##// END OF EJS Templates
py3: use email.parser module to parse email messages...
Pulkit Goyal -
r35651:a981ab2a default
parent child Browse files
Show More
@@ -1,352 +1,352 b''
1 1 # gnuarch.py - GNU Arch support for the convert extension
2 2 #
3 3 # Copyright 2008, 2009 Aleix Conchillo Flaque <aleix@member.fsf.org>
4 4 # and others
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8 from __future__ import absolute_import
9 9
10 import email
10 import email.parser as emailparser
11 11 import os
12 12 import shutil
13 13 import stat
14 14 import tempfile
15 15
16 16 from mercurial.i18n import _
17 17 from mercurial import (
18 18 encoding,
19 19 error,
20 20 util,
21 21 )
22 22 from . import common
23 23
24 24 class gnuarch_source(common.converter_source, common.commandline):
25 25
26 26 class gnuarch_rev(object):
27 27 def __init__(self, rev):
28 28 self.rev = rev
29 29 self.summary = ''
30 30 self.date = None
31 31 self.author = ''
32 32 self.continuationof = None
33 33 self.add_files = []
34 34 self.mod_files = []
35 35 self.del_files = []
36 36 self.ren_files = {}
37 37 self.ren_dirs = {}
38 38
39 39 def __init__(self, ui, repotype, path, revs=None):
40 40 super(gnuarch_source, self).__init__(ui, repotype, path, revs=revs)
41 41
42 42 if not os.path.exists(os.path.join(path, '{arch}')):
43 43 raise common.NoRepo(_("%s does not look like a GNU Arch repository")
44 44 % path)
45 45
46 46 # Could use checktool, but we want to check for baz or tla.
47 47 self.execmd = None
48 48 if util.findexe('baz'):
49 49 self.execmd = 'baz'
50 50 else:
51 51 if util.findexe('tla'):
52 52 self.execmd = 'tla'
53 53 else:
54 54 raise error.Abort(_('cannot find a GNU Arch tool'))
55 55
56 56 common.commandline.__init__(self, ui, self.execmd)
57 57
58 58 self.path = os.path.realpath(path)
59 59 self.tmppath = None
60 60
61 61 self.treeversion = None
62 62 self.lastrev = None
63 63 self.changes = {}
64 64 self.parents = {}
65 65 self.tags = {}
66 self.catlogparser = email.Parser.Parser()
66 self.catlogparser = emailparser.Parser()
67 67 self.encoding = encoding.encoding
68 68 self.archives = []
69 69
70 70 def before(self):
71 71 # Get registered archives
72 72 self.archives = [i.rstrip('\n')
73 73 for i in self.runlines0('archives', '-n')]
74 74
75 75 if self.execmd == 'tla':
76 76 output = self.run0('tree-version', self.path)
77 77 else:
78 78 output = self.run0('tree-version', '-d', self.path)
79 79 self.treeversion = output.strip()
80 80
81 81 # Get name of temporary directory
82 82 version = self.treeversion.split('/')
83 83 self.tmppath = os.path.join(tempfile.gettempdir(),
84 84 'hg-%s' % version[1])
85 85
86 86 # Generate parents dictionary
87 87 self.parents[None] = []
88 88 treeversion = self.treeversion
89 89 child = None
90 90 while treeversion:
91 91 self.ui.status(_('analyzing tree version %s...\n') % treeversion)
92 92
93 93 archive = treeversion.split('/')[0]
94 94 if archive not in self.archives:
95 95 self.ui.status(_('tree analysis stopped because it points to '
96 96 'an unregistered archive %s...\n') % archive)
97 97 break
98 98
99 99 # Get the complete list of revisions for that tree version
100 100 output, status = self.runlines('revisions', '-r', '-f', treeversion)
101 101 self.checkexit(status, 'failed retrieving revisions for %s'
102 102 % treeversion)
103 103
104 104 # No new iteration unless a revision has a continuation-of header
105 105 treeversion = None
106 106
107 107 for l in output:
108 108 rev = l.strip()
109 109 self.changes[rev] = self.gnuarch_rev(rev)
110 110 self.parents[rev] = []
111 111
112 112 # Read author, date and summary
113 113 catlog, status = self.run('cat-log', '-d', self.path, rev)
114 114 if status:
115 115 catlog = self.run0('cat-archive-log', rev)
116 116 self._parsecatlog(catlog, rev)
117 117
118 118 # Populate the parents map
119 119 self.parents[child].append(rev)
120 120
121 121 # Keep track of the current revision as the child of the next
122 122 # revision scanned
123 123 child = rev
124 124
125 125 # Check if we have to follow the usual incremental history
126 126 # or if we have to 'jump' to a different treeversion given
127 127 # by the continuation-of header.
128 128 if self.changes[rev].continuationof:
129 129 treeversion = '--'.join(
130 130 self.changes[rev].continuationof.split('--')[:-1])
131 131 break
132 132
133 133 # If we reached a base-0 revision w/o any continuation-of
134 134 # header, it means the tree history ends here.
135 135 if rev[-6:] == 'base-0':
136 136 break
137 137
138 138 def after(self):
139 139 self.ui.debug('cleaning up %s\n' % self.tmppath)
140 140 shutil.rmtree(self.tmppath, ignore_errors=True)
141 141
142 142 def getheads(self):
143 143 return self.parents[None]
144 144
145 145 def getfile(self, name, rev):
146 146 if rev != self.lastrev:
147 147 raise error.Abort(_('internal calling inconsistency'))
148 148
149 149 if not os.path.lexists(os.path.join(self.tmppath, name)):
150 150 return None, None
151 151
152 152 return self._getfile(name, rev)
153 153
154 154 def getchanges(self, rev, full):
155 155 if full:
156 156 raise error.Abort(_("convert from arch does not support --full"))
157 157 self._update(rev)
158 158 changes = []
159 159 copies = {}
160 160
161 161 for f in self.changes[rev].add_files:
162 162 changes.append((f, rev))
163 163
164 164 for f in self.changes[rev].mod_files:
165 165 changes.append((f, rev))
166 166
167 167 for f in self.changes[rev].del_files:
168 168 changes.append((f, rev))
169 169
170 170 for src in self.changes[rev].ren_files:
171 171 to = self.changes[rev].ren_files[src]
172 172 changes.append((src, rev))
173 173 changes.append((to, rev))
174 174 copies[to] = src
175 175
176 176 for src in self.changes[rev].ren_dirs:
177 177 to = self.changes[rev].ren_dirs[src]
178 178 chgs, cps = self._rendirchanges(src, to)
179 179 changes += [(f, rev) for f in chgs]
180 180 copies.update(cps)
181 181
182 182 self.lastrev = rev
183 183 return sorted(set(changes)), copies, set()
184 184
185 185 def getcommit(self, rev):
186 186 changes = self.changes[rev]
187 187 return common.commit(author=changes.author, date=changes.date,
188 188 desc=changes.summary, parents=self.parents[rev],
189 189 rev=rev)
190 190
191 191 def gettags(self):
192 192 return self.tags
193 193
194 194 def _execute(self, cmd, *args, **kwargs):
195 195 cmdline = [self.execmd, cmd]
196 196 cmdline += args
197 197 cmdline = [util.shellquote(arg) for arg in cmdline]
198 198 cmdline += ['>', os.devnull, '2>', os.devnull]
199 199 cmdline = util.quotecommand(' '.join(cmdline))
200 200 self.ui.debug(cmdline, '\n')
201 201 return os.system(cmdline)
202 202
203 203 def _update(self, rev):
204 204 self.ui.debug('applying revision %s...\n' % rev)
205 205 changeset, status = self.runlines('replay', '-d', self.tmppath,
206 206 rev)
207 207 if status:
208 208 # Something went wrong while merging (baz or tla
209 209 # issue?), get latest revision and try from there
210 210 shutil.rmtree(self.tmppath, ignore_errors=True)
211 211 self._obtainrevision(rev)
212 212 else:
213 213 old_rev = self.parents[rev][0]
214 214 self.ui.debug('computing changeset between %s and %s...\n'
215 215 % (old_rev, rev))
216 216 self._parsechangeset(changeset, rev)
217 217
218 218 def _getfile(self, name, rev):
219 219 mode = os.lstat(os.path.join(self.tmppath, name)).st_mode
220 220 if stat.S_ISLNK(mode):
221 221 data = os.readlink(os.path.join(self.tmppath, name))
222 222 if mode:
223 223 mode = 'l'
224 224 else:
225 225 mode = ''
226 226 else:
227 227 data = open(os.path.join(self.tmppath, name), 'rb').read()
228 228 mode = (mode & 0o111) and 'x' or ''
229 229 return data, mode
230 230
231 231 def _exclude(self, name):
232 232 exclude = ['{arch}', '.arch-ids', '.arch-inventory']
233 233 for exc in exclude:
234 234 if name.find(exc) != -1:
235 235 return True
236 236 return False
237 237
238 238 def _readcontents(self, path):
239 239 files = []
240 240 contents = os.listdir(path)
241 241 while len(contents) > 0:
242 242 c = contents.pop()
243 243 p = os.path.join(path, c)
244 244 # os.walk could be used, but here we avoid internal GNU
245 245 # Arch files and directories, thus saving a lot time.
246 246 if not self._exclude(p):
247 247 if os.path.isdir(p):
248 248 contents += [os.path.join(c, f) for f in os.listdir(p)]
249 249 else:
250 250 files.append(c)
251 251 return files
252 252
253 253 def _rendirchanges(self, src, dest):
254 254 changes = []
255 255 copies = {}
256 256 files = self._readcontents(os.path.join(self.tmppath, dest))
257 257 for f in files:
258 258 s = os.path.join(src, f)
259 259 d = os.path.join(dest, f)
260 260 changes.append(s)
261 261 changes.append(d)
262 262 copies[d] = s
263 263 return changes, copies
264 264
265 265 def _obtainrevision(self, rev):
266 266 self.ui.debug('obtaining revision %s...\n' % rev)
267 267 output = self._execute('get', rev, self.tmppath)
268 268 self.checkexit(output)
269 269 self.ui.debug('analyzing revision %s...\n' % rev)
270 270 files = self._readcontents(self.tmppath)
271 271 self.changes[rev].add_files += files
272 272
273 273 def _stripbasepath(self, path):
274 274 if path.startswith('./'):
275 275 return path[2:]
276 276 return path
277 277
278 278 def _parsecatlog(self, data, rev):
279 279 try:
280 280 catlog = self.catlogparser.parsestr(data)
281 281
282 282 # Commit date
283 283 self.changes[rev].date = util.datestr(
284 284 util.strdate(catlog['Standard-date'],
285 285 '%Y-%m-%d %H:%M:%S'))
286 286
287 287 # Commit author
288 288 self.changes[rev].author = self.recode(catlog['Creator'])
289 289
290 290 # Commit description
291 291 self.changes[rev].summary = '\n\n'.join((catlog['Summary'],
292 292 catlog.get_payload()))
293 293 self.changes[rev].summary = self.recode(self.changes[rev].summary)
294 294
295 295 # Commit revision origin when dealing with a branch or tag
296 296 if 'Continuation-of' in catlog:
297 297 self.changes[rev].continuationof = self.recode(
298 298 catlog['Continuation-of'])
299 299 except Exception:
300 300 raise error.Abort(_('could not parse cat-log of %s') % rev)
301 301
302 302 def _parsechangeset(self, data, rev):
303 303 for l in data:
304 304 l = l.strip()
305 305 # Added file (ignore added directory)
306 306 if l.startswith('A') and not l.startswith('A/'):
307 307 file = self._stripbasepath(l[1:].strip())
308 308 if not self._exclude(file):
309 309 self.changes[rev].add_files.append(file)
310 310 # Deleted file (ignore deleted directory)
311 311 elif l.startswith('D') and not l.startswith('D/'):
312 312 file = self._stripbasepath(l[1:].strip())
313 313 if not self._exclude(file):
314 314 self.changes[rev].del_files.append(file)
315 315 # Modified binary file
316 316 elif l.startswith('Mb'):
317 317 file = self._stripbasepath(l[2:].strip())
318 318 if not self._exclude(file):
319 319 self.changes[rev].mod_files.append(file)
320 320 # Modified link
321 321 elif l.startswith('M->'):
322 322 file = self._stripbasepath(l[3:].strip())
323 323 if not self._exclude(file):
324 324 self.changes[rev].mod_files.append(file)
325 325 # Modified file
326 326 elif l.startswith('M'):
327 327 file = self._stripbasepath(l[1:].strip())
328 328 if not self._exclude(file):
329 329 self.changes[rev].mod_files.append(file)
330 330 # Renamed file (or link)
331 331 elif l.startswith('=>'):
332 332 files = l[2:].strip().split(' ')
333 333 if len(files) == 1:
334 334 files = l[2:].strip().split('\t')
335 335 src = self._stripbasepath(files[0])
336 336 dst = self._stripbasepath(files[1])
337 337 if not self._exclude(src) and not self._exclude(dst):
338 338 self.changes[rev].ren_files[src] = dst
339 339 # Conversion from file to link or from link to file (modified)
340 340 elif l.startswith('ch'):
341 341 file = self._stripbasepath(l[2:].strip())
342 342 if not self._exclude(file):
343 343 self.changes[rev].mod_files.append(file)
344 344 # Renamed directory
345 345 elif l.startswith('/>'):
346 346 dirs = l[2:].strip().split(' ')
347 347 if len(dirs) == 1:
348 348 dirs = l[2:].strip().split('\t')
349 349 src = self._stripbasepath(dirs[0])
350 350 dst = self._stripbasepath(dirs[1])
351 351 if not self._exclude(src) and not self._exclude(dst):
352 352 self.changes[rev].ren_dirs[src] = dst
@@ -1,484 +1,485 b''
1 1 # notify.py - email notifications for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''hooks for sending email push notifications
9 9
10 10 This extension implements hooks to send email notifications when
11 11 changesets are sent from or received by the local repository.
12 12
13 13 First, enable the extension as explained in :hg:`help extensions`, and
14 14 register the hook you want to run. ``incoming`` and ``changegroup`` hooks
15 15 are run when changesets are received, while ``outgoing`` hooks are for
16 16 changesets sent to another repository::
17 17
18 18 [hooks]
19 19 # one email for each incoming changeset
20 20 incoming.notify = python:hgext.notify.hook
21 21 # one email for all incoming changesets
22 22 changegroup.notify = python:hgext.notify.hook
23 23
24 24 # one email for all outgoing changesets
25 25 outgoing.notify = python:hgext.notify.hook
26 26
27 27 This registers the hooks. To enable notification, subscribers must
28 28 be assigned to repositories. The ``[usersubs]`` section maps multiple
29 29 repositories to a given recipient. The ``[reposubs]`` section maps
30 30 multiple recipients to a single repository::
31 31
32 32 [usersubs]
33 33 # key is subscriber email, value is a comma-separated list of repo patterns
34 34 user@host = pattern
35 35
36 36 [reposubs]
37 37 # key is repo pattern, value is a comma-separated list of subscriber emails
38 38 pattern = user@host
39 39
40 40 A ``pattern`` is a ``glob`` matching the absolute path to a repository,
41 41 optionally combined with a revset expression. A revset expression, if
42 42 present, is separated from the glob by a hash. Example::
43 43
44 44 [reposubs]
45 45 */widgets#branch(release) = qa-team@example.com
46 46
47 47 This sends to ``qa-team@example.com`` whenever a changeset on the ``release``
48 48 branch triggers a notification in any repository ending in ``widgets``.
49 49
50 50 In order to place them under direct user management, ``[usersubs]`` and
51 51 ``[reposubs]`` sections may be placed in a separate ``hgrc`` file and
52 52 incorporated by reference::
53 53
54 54 [notify]
55 55 config = /path/to/subscriptionsfile
56 56
57 57 Notifications will not be sent until the ``notify.test`` value is set
58 58 to ``False``; see below.
59 59
60 60 Notifications content can be tweaked with the following configuration entries:
61 61
62 62 notify.test
63 63 If ``True``, print messages to stdout instead of sending them. Default: True.
64 64
65 65 notify.sources
66 66 Space-separated list of change sources. Notifications are activated only
67 67 when a changeset's source is in this list. Sources may be:
68 68
69 69 :``serve``: changesets received via http or ssh
70 70 :``pull``: changesets received via ``hg pull``
71 71 :``unbundle``: changesets received via ``hg unbundle``
72 72 :``push``: changesets sent or received via ``hg push``
73 73 :``bundle``: changesets sent via ``hg unbundle``
74 74
75 75 Default: serve.
76 76
77 77 notify.strip
78 78 Number of leading slashes to strip from url paths. By default, notifications
79 79 reference repositories with their absolute path. ``notify.strip`` lets you
80 80 turn them into relative paths. For example, ``notify.strip=3`` will change
81 81 ``/long/path/repository`` into ``repository``. Default: 0.
82 82
83 83 notify.domain
84 84 Default email domain for sender or recipients with no explicit domain.
85 85
86 86 notify.style
87 87 Style file to use when formatting emails.
88 88
89 89 notify.template
90 90 Template to use when formatting emails.
91 91
92 92 notify.incoming
93 93 Template to use when run as an incoming hook, overriding ``notify.template``.
94 94
95 95 notify.outgoing
96 96 Template to use when run as an outgoing hook, overriding ``notify.template``.
97 97
98 98 notify.changegroup
99 99 Template to use when running as a changegroup hook, overriding
100 100 ``notify.template``.
101 101
102 102 notify.maxdiff
103 103 Maximum number of diff lines to include in notification email. Set to 0
104 104 to disable the diff, or -1 to include all of it. Default: 300.
105 105
106 106 notify.maxsubject
107 107 Maximum number of characters in email's subject line. Default: 67.
108 108
109 109 notify.diffstat
110 110 Set to True to include a diffstat before diff content. Default: True.
111 111
112 112 notify.merge
113 113 If True, send notifications for merge changesets. Default: True.
114 114
115 115 notify.mbox
116 116 If set, append mails to this mbox file instead of sending. Default: None.
117 117
118 118 notify.fromauthor
119 119 If set, use the committer of the first changeset in a changegroup for
120 120 the "From" field of the notification mail. If not set, take the user
121 121 from the pushing repo. Default: False.
122 122
123 123 If set, the following entries will also be used to customize the
124 124 notifications:
125 125
126 126 email.from
127 127 Email ``From`` address to use if none can be found in the generated
128 128 email content.
129 129
130 130 web.baseurl
131 131 Root repository URL to combine with repository paths when making
132 132 references. See also ``notify.strip``.
133 133
134 134 '''
135 135 from __future__ import absolute_import
136 136
137 137 import email
138 import email.parser as emailparser
138 139 import fnmatch
139 140 import socket
140 141 import time
141 142
142 143 from mercurial.i18n import _
143 144 from mercurial import (
144 145 cmdutil,
145 146 error,
146 147 mail,
147 148 patch,
148 149 registrar,
149 150 util,
150 151 )
151 152
152 153 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
153 154 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
154 155 # be specifying the version(s) of Mercurial they are tested with, or
155 156 # leave the attribute unspecified.
156 157 testedwith = 'ships-with-hg-core'
157 158
158 159 configtable = {}
159 160 configitem = registrar.configitem(configtable)
160 161
161 162 configitem('notify', 'changegroup',
162 163 default=None,
163 164 )
164 165 configitem('notify', 'config',
165 166 default=None,
166 167 )
167 168 configitem('notify', 'diffstat',
168 169 default=True,
169 170 )
170 171 configitem('notify', 'domain',
171 172 default=None,
172 173 )
173 174 configitem('notify', 'fromauthor',
174 175 default=None,
175 176 )
176 177 configitem('notify', 'incoming',
177 178 default=None,
178 179 )
179 180 configitem('notify', 'maxdiff',
180 181 default=300,
181 182 )
182 183 configitem('notify', 'maxsubject',
183 184 default=67,
184 185 )
185 186 configitem('notify', 'mbox',
186 187 default=None,
187 188 )
188 189 configitem('notify', 'merge',
189 190 default=True,
190 191 )
191 192 configitem('notify', 'outgoing',
192 193 default=None,
193 194 )
194 195 configitem('notify', 'sources',
195 196 default='serve',
196 197 )
197 198 configitem('notify', 'strip',
198 199 default=0,
199 200 )
200 201 configitem('notify', 'style',
201 202 default=None,
202 203 )
203 204 configitem('notify', 'template',
204 205 default=None,
205 206 )
206 207 configitem('notify', 'test',
207 208 default=True,
208 209 )
209 210
210 211 # template for single changeset can include email headers.
211 212 single_template = '''
212 213 Subject: changeset in {webroot}: {desc|firstline|strip}
213 214 From: {author}
214 215
215 216 changeset {node|short} in {root}
216 217 details: {baseurl}{webroot}?cmd=changeset;node={node|short}
217 218 description:
218 219 \t{desc|tabindent|strip}
219 220 '''.lstrip()
220 221
221 222 # template for multiple changesets should not contain email headers,
222 223 # because only first set of headers will be used and result will look
223 224 # strange.
224 225 multiple_template = '''
225 226 changeset {node|short} in {root}
226 227 details: {baseurl}{webroot}?cmd=changeset;node={node|short}
227 228 summary: {desc|firstline}
228 229 '''
229 230
230 231 deftemplates = {
231 232 'changegroup': multiple_template,
232 233 }
233 234
234 235 class notifier(object):
235 236 '''email notification class.'''
236 237
237 238 def __init__(self, ui, repo, hooktype):
238 239 self.ui = ui
239 240 cfg = self.ui.config('notify', 'config')
240 241 if cfg:
241 242 self.ui.readconfig(cfg, sections=['usersubs', 'reposubs'])
242 243 self.repo = repo
243 244 self.stripcount = int(self.ui.config('notify', 'strip'))
244 245 self.root = self.strip(self.repo.root)
245 246 self.domain = self.ui.config('notify', 'domain')
246 247 self.mbox = self.ui.config('notify', 'mbox')
247 248 self.test = self.ui.configbool('notify', 'test')
248 249 self.charsets = mail._charsets(self.ui)
249 250 self.subs = self.subscribers()
250 251 self.merge = self.ui.configbool('notify', 'merge')
251 252
252 253 mapfile = None
253 254 template = (self.ui.config('notify', hooktype) or
254 255 self.ui.config('notify', 'template'))
255 256 if not template:
256 257 mapfile = self.ui.config('notify', 'style')
257 258 if not mapfile and not template:
258 259 template = deftemplates.get(hooktype) or single_template
259 260 spec = cmdutil.logtemplatespec(template, mapfile)
260 261 self.t = cmdutil.changeset_templater(self.ui, self.repo, spec,
261 262 False, None, False)
262 263
263 264 def strip(self, path):
264 265 '''strip leading slashes from local path, turn into web-safe path.'''
265 266
266 267 path = util.pconvert(path)
267 268 count = self.stripcount
268 269 while count > 0:
269 270 c = path.find('/')
270 271 if c == -1:
271 272 break
272 273 path = path[c + 1:]
273 274 count -= 1
274 275 return path
275 276
276 277 def fixmail(self, addr):
277 278 '''try to clean up email addresses.'''
278 279
279 280 addr = util.email(addr.strip())
280 281 if self.domain:
281 282 a = addr.find('@localhost')
282 283 if a != -1:
283 284 addr = addr[:a]
284 285 if '@' not in addr:
285 286 return addr + '@' + self.domain
286 287 return addr
287 288
288 289 def subscribers(self):
289 290 '''return list of email addresses of subscribers to this repo.'''
290 291 subs = set()
291 292 for user, pats in self.ui.configitems('usersubs'):
292 293 for pat in pats.split(','):
293 294 if '#' in pat:
294 295 pat, revs = pat.split('#', 1)
295 296 else:
296 297 revs = None
297 298 if fnmatch.fnmatch(self.repo.root, pat.strip()):
298 299 subs.add((self.fixmail(user), revs))
299 300 for pat, users in self.ui.configitems('reposubs'):
300 301 if '#' in pat:
301 302 pat, revs = pat.split('#', 1)
302 303 else:
303 304 revs = None
304 305 if fnmatch.fnmatch(self.repo.root, pat):
305 306 for user in users.split(','):
306 307 subs.add((self.fixmail(user), revs))
307 308 return [(mail.addressencode(self.ui, s, self.charsets, self.test), r)
308 309 for s, r in sorted(subs)]
309 310
310 311 def node(self, ctx, **props):
311 312 '''format one changeset, unless it is a suppressed merge.'''
312 313 if not self.merge and len(ctx.parents()) > 1:
313 314 return False
314 315 self.t.show(ctx, changes=ctx.changeset(),
315 316 baseurl=self.ui.config('web', 'baseurl'),
316 317 root=self.repo.root, webroot=self.root, **props)
317 318 return True
318 319
319 320 def skipsource(self, source):
320 321 '''true if incoming changes from this source should be skipped.'''
321 322 ok_sources = self.ui.config('notify', 'sources').split()
322 323 return source not in ok_sources
323 324
324 325 def send(self, ctx, count, data):
325 326 '''send message.'''
326 327
327 328 # Select subscribers by revset
328 329 subs = set()
329 330 for sub, spec in self.subs:
330 331 if spec is None:
331 332 subs.add(sub)
332 333 continue
333 334 revs = self.repo.revs('%r and %d:', spec, ctx.rev())
334 335 if len(revs):
335 336 subs.add(sub)
336 337 continue
337 338 if len(subs) == 0:
338 339 self.ui.debug('notify: no subscribers to selected repo '
339 340 'and revset\n')
340 341 return
341 342
342 p = email.Parser.Parser()
343 p = emailparser.Parser()
343 344 try:
344 345 msg = p.parsestr(data)
345 346 except email.Errors.MessageParseError as inst:
346 347 raise error.Abort(inst)
347 348
348 349 # store sender and subject
349 350 sender, subject = msg['From'], msg['Subject']
350 351 del msg['From'], msg['Subject']
351 352
352 353 if not msg.is_multipart():
353 354 # create fresh mime message from scratch
354 355 # (multipart templates must take care of this themselves)
355 356 headers = msg.items()
356 357 payload = msg.get_payload()
357 358 # for notification prefer readability over data precision
358 359 msg = mail.mimeencode(self.ui, payload, self.charsets, self.test)
359 360 # reinstate custom headers
360 361 for k, v in headers:
361 362 msg[k] = v
362 363
363 364 msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2")
364 365
365 366 # try to make subject line exist and be useful
366 367 if not subject:
367 368 if count > 1:
368 369 subject = _('%s: %d new changesets') % (self.root, count)
369 370 else:
370 371 s = ctx.description().lstrip().split('\n', 1)[0].rstrip()
371 372 subject = '%s: %s' % (self.root, s)
372 373 maxsubject = int(self.ui.config('notify', 'maxsubject'))
373 374 if maxsubject:
374 375 subject = util.ellipsis(subject, maxsubject)
375 376 msg['Subject'] = mail.headencode(self.ui, subject,
376 377 self.charsets, self.test)
377 378
378 379 # try to make message have proper sender
379 380 if not sender:
380 381 sender = self.ui.config('email', 'from') or self.ui.username()
381 382 if '@' not in sender or '@localhost' in sender:
382 383 sender = self.fixmail(sender)
383 384 msg['From'] = mail.addressencode(self.ui, sender,
384 385 self.charsets, self.test)
385 386
386 387 msg['X-Hg-Notification'] = 'changeset %s' % ctx
387 388 if not msg['Message-Id']:
388 389 msg['Message-Id'] = ('<hg.%s.%s.%s@%s>' %
389 390 (ctx, int(time.time()),
390 391 hash(self.repo.root), socket.getfqdn()))
391 392 msg['To'] = ', '.join(sorted(subs))
392 393
393 394 msgtext = msg.as_string()
394 395 if self.test:
395 396 self.ui.write(msgtext)
396 397 if not msgtext.endswith('\n'):
397 398 self.ui.write('\n')
398 399 else:
399 400 self.ui.status(_('notify: sending %d subscribers %d changes\n') %
400 401 (len(subs), count))
401 402 mail.sendmail(self.ui, util.email(msg['From']),
402 403 subs, msgtext, mbox=self.mbox)
403 404
404 405 def diff(self, ctx, ref=None):
405 406
406 407 maxdiff = int(self.ui.config('notify', 'maxdiff'))
407 408 prev = ctx.p1().node()
408 409 if ref:
409 410 ref = ref.node()
410 411 else:
411 412 ref = ctx.node()
412 413 chunks = patch.diff(self.repo, prev, ref,
413 414 opts=patch.diffallopts(self.ui))
414 415 difflines = ''.join(chunks).splitlines()
415 416
416 417 if self.ui.configbool('notify', 'diffstat'):
417 418 s = patch.diffstat(difflines)
418 419 # s may be nil, don't include the header if it is
419 420 if s:
420 421 self.ui.write(_('\ndiffstat:\n\n%s') % s)
421 422
422 423 if maxdiff == 0:
423 424 return
424 425 elif maxdiff > 0 and len(difflines) > maxdiff:
425 426 msg = _('\ndiffs (truncated from %d to %d lines):\n\n')
426 427 self.ui.write(msg % (len(difflines), maxdiff))
427 428 difflines = difflines[:maxdiff]
428 429 elif difflines:
429 430 self.ui.write(_('\ndiffs (%d lines):\n\n') % len(difflines))
430 431
431 432 self.ui.write("\n".join(difflines))
432 433
433 434 def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
434 435 '''send email notifications to interested subscribers.
435 436
436 437 if used as changegroup hook, send one email for all changesets in
437 438 changegroup. else send one email per changeset.'''
438 439
439 440 n = notifier(ui, repo, hooktype)
440 441 ctx = repo[node]
441 442
442 443 if not n.subs:
443 444 ui.debug('notify: no subscribers to repository %s\n' % n.root)
444 445 return
445 446 if n.skipsource(source):
446 447 ui.debug('notify: changes have source "%s" - skipping\n' % source)
447 448 return
448 449
449 450 ui.pushbuffer()
450 451 data = ''
451 452 count = 0
452 453 author = ''
453 454 if hooktype == 'changegroup' or hooktype == 'outgoing':
454 455 start, end = ctx.rev(), len(repo)
455 456 for rev in xrange(start, end):
456 457 if n.node(repo[rev]):
457 458 count += 1
458 459 if not author:
459 460 author = repo[rev].user()
460 461 else:
461 462 data += ui.popbuffer()
462 463 ui.note(_('notify: suppressing notification for merge %d:%s\n')
463 464 % (rev, repo[rev].hex()[:12]))
464 465 ui.pushbuffer()
465 466 if count:
466 467 n.diff(ctx, repo['tip'])
467 468 else:
468 469 if not n.node(ctx):
469 470 ui.popbuffer()
470 471 ui.note(_('notify: suppressing notification for merge %d:%s\n') %
471 472 (ctx.rev(), ctx.hex()[:12]))
472 473 return
473 474 count += 1
474 475 n.diff(ctx)
475 476 if not author:
476 477 author = ctx.user()
477 478
478 479 data += ui.popbuffer()
479 480 fromauthor = ui.config('notify', 'fromauthor')
480 481 if author and fromauthor:
481 482 data = '\n'.join(['From: %s' % author, data])
482 483
483 484 if count:
484 485 n.send(ctx, count, data)
@@ -1,2900 +1,2901 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import, print_function
10 10
11 11 import collections
12 12 import copy
13 13 import difflib
14 14 import email
15 import email.parser as emailparser
15 16 import errno
16 17 import hashlib
17 18 import os
18 19 import posixpath
19 20 import re
20 21 import shutil
21 22 import tempfile
22 23 import zlib
23 24
24 25 from .i18n import _
25 26 from .node import (
26 27 hex,
27 28 short,
28 29 )
29 30 from . import (
30 31 copies,
31 32 encoding,
32 33 error,
33 34 mail,
34 35 mdiff,
35 36 pathutil,
36 37 policy,
37 38 pycompat,
38 39 scmutil,
39 40 similar,
40 41 util,
41 42 vfs as vfsmod,
42 43 )
43 44
44 45 diffhelpers = policy.importmod(r'diffhelpers')
45 46 stringio = util.stringio
46 47
47 48 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
48 49 tabsplitter = re.compile(br'(\t+|[^\t]+)')
49 50 _nonwordre = re.compile(br'([^a-zA-Z0-9_\x80-\xff])')
50 51
51 52 PatchError = error.PatchError
52 53
53 54 # public functions
54 55
55 56 def split(stream):
56 57 '''return an iterator of individual patches from a stream'''
57 58 def isheader(line, inheader):
58 59 if inheader and line[0] in (' ', '\t'):
59 60 # continuation
60 61 return True
61 62 if line[0] in (' ', '-', '+'):
62 63 # diff line - don't check for header pattern in there
63 64 return False
64 65 l = line.split(': ', 1)
65 66 return len(l) == 2 and ' ' not in l[0]
66 67
67 68 def chunk(lines):
68 69 return stringio(''.join(lines))
69 70
70 71 def hgsplit(stream, cur):
71 72 inheader = True
72 73
73 74 for line in stream:
74 75 if not line.strip():
75 76 inheader = False
76 77 if not inheader and line.startswith('# HG changeset patch'):
77 78 yield chunk(cur)
78 79 cur = []
79 80 inheader = True
80 81
81 82 cur.append(line)
82 83
83 84 if cur:
84 85 yield chunk(cur)
85 86
86 87 def mboxsplit(stream, cur):
87 88 for line in stream:
88 89 if line.startswith('From '):
89 90 for c in split(chunk(cur[1:])):
90 91 yield c
91 92 cur = []
92 93
93 94 cur.append(line)
94 95
95 96 if cur:
96 97 for c in split(chunk(cur[1:])):
97 98 yield c
98 99
99 100 def mimesplit(stream, cur):
100 101 def msgfp(m):
101 102 fp = stringio()
102 103 g = email.Generator.Generator(fp, mangle_from_=False)
103 104 g.flatten(m)
104 105 fp.seek(0)
105 106 return fp
106 107
107 108 for line in stream:
108 109 cur.append(line)
109 110 c = chunk(cur)
110 111
111 m = email.Parser.Parser().parse(c)
112 m = emailparser.Parser().parse(c)
112 113 if not m.is_multipart():
113 114 yield msgfp(m)
114 115 else:
115 116 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
116 117 for part in m.walk():
117 118 ct = part.get_content_type()
118 119 if ct not in ok_types:
119 120 continue
120 121 yield msgfp(part)
121 122
122 123 def headersplit(stream, cur):
123 124 inheader = False
124 125
125 126 for line in stream:
126 127 if not inheader and isheader(line, inheader):
127 128 yield chunk(cur)
128 129 cur = []
129 130 inheader = True
130 131 if inheader and not isheader(line, inheader):
131 132 inheader = False
132 133
133 134 cur.append(line)
134 135
135 136 if cur:
136 137 yield chunk(cur)
137 138
138 139 def remainder(cur):
139 140 yield chunk(cur)
140 141
141 142 class fiter(object):
142 143 def __init__(self, fp):
143 144 self.fp = fp
144 145
145 146 def __iter__(self):
146 147 return self
147 148
148 149 def next(self):
149 150 l = self.fp.readline()
150 151 if not l:
151 152 raise StopIteration
152 153 return l
153 154
154 155 __next__ = next
155 156
156 157 inheader = False
157 158 cur = []
158 159
159 160 mimeheaders = ['content-type']
160 161
161 162 if not util.safehasattr(stream, 'next'):
162 163 # http responses, for example, have readline but not next
163 164 stream = fiter(stream)
164 165
165 166 for line in stream:
166 167 cur.append(line)
167 168 if line.startswith('# HG changeset patch'):
168 169 return hgsplit(stream, cur)
169 170 elif line.startswith('From '):
170 171 return mboxsplit(stream, cur)
171 172 elif isheader(line, inheader):
172 173 inheader = True
173 174 if line.split(':', 1)[0].lower() in mimeheaders:
174 175 # let email parser handle this
175 176 return mimesplit(stream, cur)
176 177 elif line.startswith('--- ') and inheader:
177 178 # No evil headers seen by diff start, split by hand
178 179 return headersplit(stream, cur)
179 180 # Not enough info, keep reading
180 181
181 182 # if we are here, we have a very plain patch
182 183 return remainder(cur)
183 184
184 185 ## Some facility for extensible patch parsing:
185 186 # list of pairs ("header to match", "data key")
186 187 patchheadermap = [('Date', 'date'),
187 188 ('Branch', 'branch'),
188 189 ('Node ID', 'nodeid'),
189 190 ]
190 191
191 192 def extract(ui, fileobj):
192 193 '''extract patch from data read from fileobj.
193 194
194 195 patch can be a normal patch or contained in an email message.
195 196
196 197 return a dictionary. Standard keys are:
197 198 - filename,
198 199 - message,
199 200 - user,
200 201 - date,
201 202 - branch,
202 203 - node,
203 204 - p1,
204 205 - p2.
205 206 Any item can be missing from the dictionary. If filename is missing,
206 207 fileobj did not contain a patch. Caller must unlink filename when done.'''
207 208
208 209 # attempt to detect the start of a patch
209 210 # (this heuristic is borrowed from quilt)
210 211 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
211 212 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
212 213 br'---[ \t].*?^\+\+\+[ \t]|'
213 214 br'\*\*\*[ \t].*?^---[ \t])',
214 215 re.MULTILINE | re.DOTALL)
215 216
216 217 data = {}
217 218 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
218 219 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
219 220 try:
220 msg = email.Parser.Parser().parse(fileobj)
221 msg = emailparser.Parser().parse(fileobj)
221 222
222 223 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
223 224 data['user'] = msg['From'] and mail.headdecode(msg['From'])
224 225 if not subject and not data['user']:
225 226 # Not an email, restore parsed headers if any
226 227 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
227 228
228 229 # should try to parse msg['Date']
229 230 parents = []
230 231
231 232 if subject:
232 233 if subject.startswith('[PATCH'):
233 234 pend = subject.find(']')
234 235 if pend >= 0:
235 236 subject = subject[pend + 1:].lstrip()
236 237 subject = re.sub(br'\n[ \t]+', ' ', subject)
237 238 ui.debug('Subject: %s\n' % subject)
238 239 if data['user']:
239 240 ui.debug('From: %s\n' % data['user'])
240 241 diffs_seen = 0
241 242 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
242 243 message = ''
243 244 for part in msg.walk():
244 245 content_type = part.get_content_type()
245 246 ui.debug('Content-Type: %s\n' % content_type)
246 247 if content_type not in ok_types:
247 248 continue
248 249 payload = part.get_payload(decode=True)
249 250 m = diffre.search(payload)
250 251 if m:
251 252 hgpatch = False
252 253 hgpatchheader = False
253 254 ignoretext = False
254 255
255 256 ui.debug('found patch at byte %d\n' % m.start(0))
256 257 diffs_seen += 1
257 258 cfp = stringio()
258 259 for line in payload[:m.start(0)].splitlines():
259 260 if line.startswith('# HG changeset patch') and not hgpatch:
260 261 ui.debug('patch generated by hg export\n')
261 262 hgpatch = True
262 263 hgpatchheader = True
263 264 # drop earlier commit message content
264 265 cfp.seek(0)
265 266 cfp.truncate()
266 267 subject = None
267 268 elif hgpatchheader:
268 269 if line.startswith('# User '):
269 270 data['user'] = line[7:]
270 271 ui.debug('From: %s\n' % data['user'])
271 272 elif line.startswith("# Parent "):
272 273 parents.append(line[9:].lstrip())
273 274 elif line.startswith("# "):
274 275 for header, key in patchheadermap:
275 276 prefix = '# %s ' % header
276 277 if line.startswith(prefix):
277 278 data[key] = line[len(prefix):]
278 279 else:
279 280 hgpatchheader = False
280 281 elif line == '---':
281 282 ignoretext = True
282 283 if not hgpatchheader and not ignoretext:
283 284 cfp.write(line)
284 285 cfp.write('\n')
285 286 message = cfp.getvalue()
286 287 if tmpfp:
287 288 tmpfp.write(payload)
288 289 if not payload.endswith('\n'):
289 290 tmpfp.write('\n')
290 291 elif not diffs_seen and message and content_type == 'text/plain':
291 292 message += '\n' + payload
292 293 except: # re-raises
293 294 tmpfp.close()
294 295 os.unlink(tmpname)
295 296 raise
296 297
297 298 if subject and not message.startswith(subject):
298 299 message = '%s\n%s' % (subject, message)
299 300 data['message'] = message
300 301 tmpfp.close()
301 302 if parents:
302 303 data['p1'] = parents.pop(0)
303 304 if parents:
304 305 data['p2'] = parents.pop(0)
305 306
306 307 if diffs_seen:
307 308 data['filename'] = tmpname
308 309 else:
309 310 os.unlink(tmpname)
310 311 return data
311 312
312 313 class patchmeta(object):
313 314 """Patched file metadata
314 315
315 316 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
316 317 or COPY. 'path' is patched file path. 'oldpath' is set to the
317 318 origin file when 'op' is either COPY or RENAME, None otherwise. If
318 319 file mode is changed, 'mode' is a tuple (islink, isexec) where
319 320 'islink' is True if the file is a symlink and 'isexec' is True if
320 321 the file is executable. Otherwise, 'mode' is None.
321 322 """
322 323 def __init__(self, path):
323 324 self.path = path
324 325 self.oldpath = None
325 326 self.mode = None
326 327 self.op = 'MODIFY'
327 328 self.binary = False
328 329
329 330 def setmode(self, mode):
330 331 islink = mode & 0o20000
331 332 isexec = mode & 0o100
332 333 self.mode = (islink, isexec)
333 334
334 335 def copy(self):
335 336 other = patchmeta(self.path)
336 337 other.oldpath = self.oldpath
337 338 other.mode = self.mode
338 339 other.op = self.op
339 340 other.binary = self.binary
340 341 return other
341 342
342 343 def _ispatchinga(self, afile):
343 344 if afile == '/dev/null':
344 345 return self.op == 'ADD'
345 346 return afile == 'a/' + (self.oldpath or self.path)
346 347
347 348 def _ispatchingb(self, bfile):
348 349 if bfile == '/dev/null':
349 350 return self.op == 'DELETE'
350 351 return bfile == 'b/' + self.path
351 352
352 353 def ispatching(self, afile, bfile):
353 354 return self._ispatchinga(afile) and self._ispatchingb(bfile)
354 355
355 356 def __repr__(self):
356 357 return "<patchmeta %s %r>" % (self.op, self.path)
357 358
358 359 def readgitpatch(lr):
359 360 """extract git-style metadata about patches from <patchname>"""
360 361
361 362 # Filter patch for git information
362 363 gp = None
363 364 gitpatches = []
364 365 for line in lr:
365 366 line = line.rstrip(' \r\n')
366 367 if line.startswith('diff --git a/'):
367 368 m = gitre.match(line)
368 369 if m:
369 370 if gp:
370 371 gitpatches.append(gp)
371 372 dst = m.group(2)
372 373 gp = patchmeta(dst)
373 374 elif gp:
374 375 if line.startswith('--- '):
375 376 gitpatches.append(gp)
376 377 gp = None
377 378 continue
378 379 if line.startswith('rename from '):
379 380 gp.op = 'RENAME'
380 381 gp.oldpath = line[12:]
381 382 elif line.startswith('rename to '):
382 383 gp.path = line[10:]
383 384 elif line.startswith('copy from '):
384 385 gp.op = 'COPY'
385 386 gp.oldpath = line[10:]
386 387 elif line.startswith('copy to '):
387 388 gp.path = line[8:]
388 389 elif line.startswith('deleted file'):
389 390 gp.op = 'DELETE'
390 391 elif line.startswith('new file mode '):
391 392 gp.op = 'ADD'
392 393 gp.setmode(int(line[-6:], 8))
393 394 elif line.startswith('new mode '):
394 395 gp.setmode(int(line[-6:], 8))
395 396 elif line.startswith('GIT binary patch'):
396 397 gp.binary = True
397 398 if gp:
398 399 gitpatches.append(gp)
399 400
400 401 return gitpatches
401 402
402 403 class linereader(object):
403 404 # simple class to allow pushing lines back into the input stream
404 405 def __init__(self, fp):
405 406 self.fp = fp
406 407 self.buf = []
407 408
408 409 def push(self, line):
409 410 if line is not None:
410 411 self.buf.append(line)
411 412
412 413 def readline(self):
413 414 if self.buf:
414 415 l = self.buf[0]
415 416 del self.buf[0]
416 417 return l
417 418 return self.fp.readline()
418 419
419 420 def __iter__(self):
420 421 return iter(self.readline, '')
421 422
422 423 class abstractbackend(object):
423 424 def __init__(self, ui):
424 425 self.ui = ui
425 426
426 427 def getfile(self, fname):
427 428 """Return target file data and flags as a (data, (islink,
428 429 isexec)) tuple. Data is None if file is missing/deleted.
429 430 """
430 431 raise NotImplementedError
431 432
432 433 def setfile(self, fname, data, mode, copysource):
433 434 """Write data to target file fname and set its mode. mode is a
434 435 (islink, isexec) tuple. If data is None, the file content should
435 436 be left unchanged. If the file is modified after being copied,
436 437 copysource is set to the original file name.
437 438 """
438 439 raise NotImplementedError
439 440
440 441 def unlink(self, fname):
441 442 """Unlink target file."""
442 443 raise NotImplementedError
443 444
444 445 def writerej(self, fname, failed, total, lines):
445 446 """Write rejected lines for fname. total is the number of hunks
446 447 which failed to apply and total the total number of hunks for this
447 448 files.
448 449 """
449 450
450 451 def exists(self, fname):
451 452 raise NotImplementedError
452 453
453 454 def close(self):
454 455 raise NotImplementedError
455 456
456 457 class fsbackend(abstractbackend):
457 458 def __init__(self, ui, basedir):
458 459 super(fsbackend, self).__init__(ui)
459 460 self.opener = vfsmod.vfs(basedir)
460 461
461 462 def getfile(self, fname):
462 463 if self.opener.islink(fname):
463 464 return (self.opener.readlink(fname), (True, False))
464 465
465 466 isexec = False
466 467 try:
467 468 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
468 469 except OSError as e:
469 470 if e.errno != errno.ENOENT:
470 471 raise
471 472 try:
472 473 return (self.opener.read(fname), (False, isexec))
473 474 except IOError as e:
474 475 if e.errno != errno.ENOENT:
475 476 raise
476 477 return None, None
477 478
478 479 def setfile(self, fname, data, mode, copysource):
479 480 islink, isexec = mode
480 481 if data is None:
481 482 self.opener.setflags(fname, islink, isexec)
482 483 return
483 484 if islink:
484 485 self.opener.symlink(data, fname)
485 486 else:
486 487 self.opener.write(fname, data)
487 488 if isexec:
488 489 self.opener.setflags(fname, False, True)
489 490
490 491 def unlink(self, fname):
491 492 self.opener.unlinkpath(fname, ignoremissing=True)
492 493
493 494 def writerej(self, fname, failed, total, lines):
494 495 fname = fname + ".rej"
495 496 self.ui.warn(
496 497 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
497 498 (failed, total, fname))
498 499 fp = self.opener(fname, 'w')
499 500 fp.writelines(lines)
500 501 fp.close()
501 502
502 503 def exists(self, fname):
503 504 return self.opener.lexists(fname)
504 505
505 506 class workingbackend(fsbackend):
506 507 def __init__(self, ui, repo, similarity):
507 508 super(workingbackend, self).__init__(ui, repo.root)
508 509 self.repo = repo
509 510 self.similarity = similarity
510 511 self.removed = set()
511 512 self.changed = set()
512 513 self.copied = []
513 514
514 515 def _checkknown(self, fname):
515 516 if self.repo.dirstate[fname] == '?' and self.exists(fname):
516 517 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
517 518
518 519 def setfile(self, fname, data, mode, copysource):
519 520 self._checkknown(fname)
520 521 super(workingbackend, self).setfile(fname, data, mode, copysource)
521 522 if copysource is not None:
522 523 self.copied.append((copysource, fname))
523 524 self.changed.add(fname)
524 525
525 526 def unlink(self, fname):
526 527 self._checkknown(fname)
527 528 super(workingbackend, self).unlink(fname)
528 529 self.removed.add(fname)
529 530 self.changed.add(fname)
530 531
531 532 def close(self):
532 533 wctx = self.repo[None]
533 534 changed = set(self.changed)
534 535 for src, dst in self.copied:
535 536 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
536 537 if self.removed:
537 538 wctx.forget(sorted(self.removed))
538 539 for f in self.removed:
539 540 if f not in self.repo.dirstate:
540 541 # File was deleted and no longer belongs to the
541 542 # dirstate, it was probably marked added then
542 543 # deleted, and should not be considered by
543 544 # marktouched().
544 545 changed.discard(f)
545 546 if changed:
546 547 scmutil.marktouched(self.repo, changed, self.similarity)
547 548 return sorted(self.changed)
548 549
549 550 class filestore(object):
550 551 def __init__(self, maxsize=None):
551 552 self.opener = None
552 553 self.files = {}
553 554 self.created = 0
554 555 self.maxsize = maxsize
555 556 if self.maxsize is None:
556 557 self.maxsize = 4*(2**20)
557 558 self.size = 0
558 559 self.data = {}
559 560
560 561 def setfile(self, fname, data, mode, copied=None):
561 562 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
562 563 self.data[fname] = (data, mode, copied)
563 564 self.size += len(data)
564 565 else:
565 566 if self.opener is None:
566 567 root = tempfile.mkdtemp(prefix='hg-patch-')
567 568 self.opener = vfsmod.vfs(root)
568 569 # Avoid filename issues with these simple names
569 570 fn = str(self.created)
570 571 self.opener.write(fn, data)
571 572 self.created += 1
572 573 self.files[fname] = (fn, mode, copied)
573 574
574 575 def getfile(self, fname):
575 576 if fname in self.data:
576 577 return self.data[fname]
577 578 if not self.opener or fname not in self.files:
578 579 return None, None, None
579 580 fn, mode, copied = self.files[fname]
580 581 return self.opener.read(fn), mode, copied
581 582
582 583 def close(self):
583 584 if self.opener:
584 585 shutil.rmtree(self.opener.base)
585 586
586 587 class repobackend(abstractbackend):
587 588 def __init__(self, ui, repo, ctx, store):
588 589 super(repobackend, self).__init__(ui)
589 590 self.repo = repo
590 591 self.ctx = ctx
591 592 self.store = store
592 593 self.changed = set()
593 594 self.removed = set()
594 595 self.copied = {}
595 596
596 597 def _checkknown(self, fname):
597 598 if fname not in self.ctx:
598 599 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
599 600
600 601 def getfile(self, fname):
601 602 try:
602 603 fctx = self.ctx[fname]
603 604 except error.LookupError:
604 605 return None, None
605 606 flags = fctx.flags()
606 607 return fctx.data(), ('l' in flags, 'x' in flags)
607 608
608 609 def setfile(self, fname, data, mode, copysource):
609 610 if copysource:
610 611 self._checkknown(copysource)
611 612 if data is None:
612 613 data = self.ctx[fname].data()
613 614 self.store.setfile(fname, data, mode, copysource)
614 615 self.changed.add(fname)
615 616 if copysource:
616 617 self.copied[fname] = copysource
617 618
618 619 def unlink(self, fname):
619 620 self._checkknown(fname)
620 621 self.removed.add(fname)
621 622
622 623 def exists(self, fname):
623 624 return fname in self.ctx
624 625
625 626 def close(self):
626 627 return self.changed | self.removed
627 628
628 629 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
629 630 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
630 631 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
631 632 eolmodes = ['strict', 'crlf', 'lf', 'auto']
632 633
633 634 class patchfile(object):
634 635 def __init__(self, ui, gp, backend, store, eolmode='strict'):
635 636 self.fname = gp.path
636 637 self.eolmode = eolmode
637 638 self.eol = None
638 639 self.backend = backend
639 640 self.ui = ui
640 641 self.lines = []
641 642 self.exists = False
642 643 self.missing = True
643 644 self.mode = gp.mode
644 645 self.copysource = gp.oldpath
645 646 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
646 647 self.remove = gp.op == 'DELETE'
647 648 if self.copysource is None:
648 649 data, mode = backend.getfile(self.fname)
649 650 else:
650 651 data, mode = store.getfile(self.copysource)[:2]
651 652 if data is not None:
652 653 self.exists = self.copysource is None or backend.exists(self.fname)
653 654 self.missing = False
654 655 if data:
655 656 self.lines = mdiff.splitnewlines(data)
656 657 if self.mode is None:
657 658 self.mode = mode
658 659 if self.lines:
659 660 # Normalize line endings
660 661 if self.lines[0].endswith('\r\n'):
661 662 self.eol = '\r\n'
662 663 elif self.lines[0].endswith('\n'):
663 664 self.eol = '\n'
664 665 if eolmode != 'strict':
665 666 nlines = []
666 667 for l in self.lines:
667 668 if l.endswith('\r\n'):
668 669 l = l[:-2] + '\n'
669 670 nlines.append(l)
670 671 self.lines = nlines
671 672 else:
672 673 if self.create:
673 674 self.missing = False
674 675 if self.mode is None:
675 676 self.mode = (False, False)
676 677 if self.missing:
677 678 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
678 679 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
679 680 "current directory)\n"))
680 681
681 682 self.hash = {}
682 683 self.dirty = 0
683 684 self.offset = 0
684 685 self.skew = 0
685 686 self.rej = []
686 687 self.fileprinted = False
687 688 self.printfile(False)
688 689 self.hunks = 0
689 690
690 691 def writelines(self, fname, lines, mode):
691 692 if self.eolmode == 'auto':
692 693 eol = self.eol
693 694 elif self.eolmode == 'crlf':
694 695 eol = '\r\n'
695 696 else:
696 697 eol = '\n'
697 698
698 699 if self.eolmode != 'strict' and eol and eol != '\n':
699 700 rawlines = []
700 701 for l in lines:
701 702 if l and l[-1] == '\n':
702 703 l = l[:-1] + eol
703 704 rawlines.append(l)
704 705 lines = rawlines
705 706
706 707 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
707 708
708 709 def printfile(self, warn):
709 710 if self.fileprinted:
710 711 return
711 712 if warn or self.ui.verbose:
712 713 self.fileprinted = True
713 714 s = _("patching file %s\n") % self.fname
714 715 if warn:
715 716 self.ui.warn(s)
716 717 else:
717 718 self.ui.note(s)
718 719
719 720
720 721 def findlines(self, l, linenum):
721 722 # looks through the hash and finds candidate lines. The
722 723 # result is a list of line numbers sorted based on distance
723 724 # from linenum
724 725
725 726 cand = self.hash.get(l, [])
726 727 if len(cand) > 1:
727 728 # resort our list of potentials forward then back.
728 729 cand.sort(key=lambda x: abs(x - linenum))
729 730 return cand
730 731
731 732 def write_rej(self):
732 733 # our rejects are a little different from patch(1). This always
733 734 # creates rejects in the same form as the original patch. A file
734 735 # header is inserted so that you can run the reject through patch again
735 736 # without having to type the filename.
736 737 if not self.rej:
737 738 return
738 739 base = os.path.basename(self.fname)
739 740 lines = ["--- %s\n+++ %s\n" % (base, base)]
740 741 for x in self.rej:
741 742 for l in x.hunk:
742 743 lines.append(l)
743 744 if l[-1:] != '\n':
744 745 lines.append("\n\ No newline at end of file\n")
745 746 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
746 747
747 748 def apply(self, h):
748 749 if not h.complete():
749 750 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
750 751 (h.number, h.desc, len(h.a), h.lena, len(h.b),
751 752 h.lenb))
752 753
753 754 self.hunks += 1
754 755
755 756 if self.missing:
756 757 self.rej.append(h)
757 758 return -1
758 759
759 760 if self.exists and self.create:
760 761 if self.copysource:
761 762 self.ui.warn(_("cannot create %s: destination already "
762 763 "exists\n") % self.fname)
763 764 else:
764 765 self.ui.warn(_("file %s already exists\n") % self.fname)
765 766 self.rej.append(h)
766 767 return -1
767 768
768 769 if isinstance(h, binhunk):
769 770 if self.remove:
770 771 self.backend.unlink(self.fname)
771 772 else:
772 773 l = h.new(self.lines)
773 774 self.lines[:] = l
774 775 self.offset += len(l)
775 776 self.dirty = True
776 777 return 0
777 778
778 779 horig = h
779 780 if (self.eolmode in ('crlf', 'lf')
780 781 or self.eolmode == 'auto' and self.eol):
781 782 # If new eols are going to be normalized, then normalize
782 783 # hunk data before patching. Otherwise, preserve input
783 784 # line-endings.
784 785 h = h.getnormalized()
785 786
786 787 # fast case first, no offsets, no fuzz
787 788 old, oldstart, new, newstart = h.fuzzit(0, False)
788 789 oldstart += self.offset
789 790 orig_start = oldstart
790 791 # if there's skew we want to emit the "(offset %d lines)" even
791 792 # when the hunk cleanly applies at start + skew, so skip the
792 793 # fast case code
793 794 if (self.skew == 0 and
794 795 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
795 796 if self.remove:
796 797 self.backend.unlink(self.fname)
797 798 else:
798 799 self.lines[oldstart:oldstart + len(old)] = new
799 800 self.offset += len(new) - len(old)
800 801 self.dirty = True
801 802 return 0
802 803
803 804 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
804 805 self.hash = {}
805 806 for x, s in enumerate(self.lines):
806 807 self.hash.setdefault(s, []).append(x)
807 808
808 809 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
809 810 for toponly in [True, False]:
810 811 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
811 812 oldstart = oldstart + self.offset + self.skew
812 813 oldstart = min(oldstart, len(self.lines))
813 814 if old:
814 815 cand = self.findlines(old[0][1:], oldstart)
815 816 else:
816 817 # Only adding lines with no or fuzzed context, just
817 818 # take the skew in account
818 819 cand = [oldstart]
819 820
820 821 for l in cand:
821 822 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
822 823 self.lines[l : l + len(old)] = new
823 824 self.offset += len(new) - len(old)
824 825 self.skew = l - orig_start
825 826 self.dirty = True
826 827 offset = l - orig_start - fuzzlen
827 828 if fuzzlen:
828 829 msg = _("Hunk #%d succeeded at %d "
829 830 "with fuzz %d "
830 831 "(offset %d lines).\n")
831 832 self.printfile(True)
832 833 self.ui.warn(msg %
833 834 (h.number, l + 1, fuzzlen, offset))
834 835 else:
835 836 msg = _("Hunk #%d succeeded at %d "
836 837 "(offset %d lines).\n")
837 838 self.ui.note(msg % (h.number, l + 1, offset))
838 839 return fuzzlen
839 840 self.printfile(True)
840 841 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
841 842 self.rej.append(horig)
842 843 return -1
843 844
844 845 def close(self):
845 846 if self.dirty:
846 847 self.writelines(self.fname, self.lines, self.mode)
847 848 self.write_rej()
848 849 return len(self.rej)
849 850
850 851 class header(object):
851 852 """patch header
852 853 """
853 854 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
854 855 diff_re = re.compile('diff -r .* (.*)$')
855 856 allhunks_re = re.compile('(?:index|deleted file) ')
856 857 pretty_re = re.compile('(?:new file|deleted file) ')
857 858 special_re = re.compile('(?:index|deleted|copy|rename) ')
858 859 newfile_re = re.compile('(?:new file)')
859 860
860 861 def __init__(self, header):
861 862 self.header = header
862 863 self.hunks = []
863 864
864 865 def binary(self):
865 866 return any(h.startswith('index ') for h in self.header)
866 867
867 868 def pretty(self, fp):
868 869 for h in self.header:
869 870 if h.startswith('index '):
870 871 fp.write(_('this modifies a binary file (all or nothing)\n'))
871 872 break
872 873 if self.pretty_re.match(h):
873 874 fp.write(h)
874 875 if self.binary():
875 876 fp.write(_('this is a binary file\n'))
876 877 break
877 878 if h.startswith('---'):
878 879 fp.write(_('%d hunks, %d lines changed\n') %
879 880 (len(self.hunks),
880 881 sum([max(h.added, h.removed) for h in self.hunks])))
881 882 break
882 883 fp.write(h)
883 884
884 885 def write(self, fp):
885 886 fp.write(''.join(self.header))
886 887
887 888 def allhunks(self):
888 889 return any(self.allhunks_re.match(h) for h in self.header)
889 890
890 891 def files(self):
891 892 match = self.diffgit_re.match(self.header[0])
892 893 if match:
893 894 fromfile, tofile = match.groups()
894 895 if fromfile == tofile:
895 896 return [fromfile]
896 897 return [fromfile, tofile]
897 898 else:
898 899 return self.diff_re.match(self.header[0]).groups()
899 900
900 901 def filename(self):
901 902 return self.files()[-1]
902 903
903 904 def __repr__(self):
904 905 return '<header %s>' % (' '.join(map(repr, self.files())))
905 906
906 907 def isnewfile(self):
907 908 return any(self.newfile_re.match(h) for h in self.header)
908 909
909 910 def special(self):
910 911 # Special files are shown only at the header level and not at the hunk
911 912 # level for example a file that has been deleted is a special file.
912 913 # The user cannot change the content of the operation, in the case of
913 914 # the deleted file he has to take the deletion or not take it, he
914 915 # cannot take some of it.
915 916 # Newly added files are special if they are empty, they are not special
916 917 # if they have some content as we want to be able to change it
917 918 nocontent = len(self.header) == 2
918 919 emptynewfile = self.isnewfile() and nocontent
919 920 return emptynewfile or \
920 921 any(self.special_re.match(h) for h in self.header)
921 922
922 923 class recordhunk(object):
923 924 """patch hunk
924 925
925 926 XXX shouldn't we merge this with the other hunk class?
926 927 """
927 928
928 929 def __init__(self, header, fromline, toline, proc, before, hunk, after,
929 930 maxcontext=None):
930 931 def trimcontext(lines, reverse=False):
931 932 if maxcontext is not None:
932 933 delta = len(lines) - maxcontext
933 934 if delta > 0:
934 935 if reverse:
935 936 return delta, lines[delta:]
936 937 else:
937 938 return delta, lines[:maxcontext]
938 939 return 0, lines
939 940
940 941 self.header = header
941 942 trimedbefore, self.before = trimcontext(before, True)
942 943 self.fromline = fromline + trimedbefore
943 944 self.toline = toline + trimedbefore
944 945 _trimedafter, self.after = trimcontext(after, False)
945 946 self.proc = proc
946 947 self.hunk = hunk
947 948 self.added, self.removed = self.countchanges(self.hunk)
948 949
949 950 def __eq__(self, v):
950 951 if not isinstance(v, recordhunk):
951 952 return False
952 953
953 954 return ((v.hunk == self.hunk) and
954 955 (v.proc == self.proc) and
955 956 (self.fromline == v.fromline) and
956 957 (self.header.files() == v.header.files()))
957 958
958 959 def __hash__(self):
959 960 return hash((tuple(self.hunk),
960 961 tuple(self.header.files()),
961 962 self.fromline,
962 963 self.proc))
963 964
964 965 def countchanges(self, hunk):
965 966 """hunk -> (n+,n-)"""
966 967 add = len([h for h in hunk if h.startswith('+')])
967 968 rem = len([h for h in hunk if h.startswith('-')])
968 969 return add, rem
969 970
970 971 def reversehunk(self):
971 972 """return another recordhunk which is the reverse of the hunk
972 973
973 974 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
974 975 that, swap fromline/toline and +/- signs while keep other things
975 976 unchanged.
976 977 """
977 978 m = {'+': '-', '-': '+', '\\': '\\'}
978 979 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
979 980 return recordhunk(self.header, self.toline, self.fromline, self.proc,
980 981 self.before, hunk, self.after)
981 982
982 983 def write(self, fp):
983 984 delta = len(self.before) + len(self.after)
984 985 if self.after and self.after[-1] == '\\ No newline at end of file\n':
985 986 delta -= 1
986 987 fromlen = delta + self.removed
987 988 tolen = delta + self.added
988 989 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
989 990 (self.fromline, fromlen, self.toline, tolen,
990 991 self.proc and (' ' + self.proc)))
991 992 fp.write(''.join(self.before + self.hunk + self.after))
992 993
993 994 pretty = write
994 995
995 996 def filename(self):
996 997 return self.header.filename()
997 998
998 999 def __repr__(self):
999 1000 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1000 1001
1001 1002 def getmessages():
1002 1003 return {
1003 1004 'multiple': {
1004 1005 'apply': _("apply change %d/%d to '%s'?"),
1005 1006 'discard': _("discard change %d/%d to '%s'?"),
1006 1007 'record': _("record change %d/%d to '%s'?"),
1007 1008 },
1008 1009 'single': {
1009 1010 'apply': _("apply this change to '%s'?"),
1010 1011 'discard': _("discard this change to '%s'?"),
1011 1012 'record': _("record this change to '%s'?"),
1012 1013 },
1013 1014 'help': {
1014 1015 'apply': _('[Ynesfdaq?]'
1015 1016 '$$ &Yes, apply this change'
1016 1017 '$$ &No, skip this change'
1017 1018 '$$ &Edit this change manually'
1018 1019 '$$ &Skip remaining changes to this file'
1019 1020 '$$ Apply remaining changes to this &file'
1020 1021 '$$ &Done, skip remaining changes and files'
1021 1022 '$$ Apply &all changes to all remaining files'
1022 1023 '$$ &Quit, applying no changes'
1023 1024 '$$ &? (display help)'),
1024 1025 'discard': _('[Ynesfdaq?]'
1025 1026 '$$ &Yes, discard this change'
1026 1027 '$$ &No, skip this change'
1027 1028 '$$ &Edit this change manually'
1028 1029 '$$ &Skip remaining changes to this file'
1029 1030 '$$ Discard remaining changes to this &file'
1030 1031 '$$ &Done, skip remaining changes and files'
1031 1032 '$$ Discard &all changes to all remaining files'
1032 1033 '$$ &Quit, discarding no changes'
1033 1034 '$$ &? (display help)'),
1034 1035 'record': _('[Ynesfdaq?]'
1035 1036 '$$ &Yes, record this change'
1036 1037 '$$ &No, skip this change'
1037 1038 '$$ &Edit this change manually'
1038 1039 '$$ &Skip remaining changes to this file'
1039 1040 '$$ Record remaining changes to this &file'
1040 1041 '$$ &Done, skip remaining changes and files'
1041 1042 '$$ Record &all changes to all remaining files'
1042 1043 '$$ &Quit, recording no changes'
1043 1044 '$$ &? (display help)'),
1044 1045 }
1045 1046 }
1046 1047
1047 1048 def filterpatch(ui, headers, operation=None):
1048 1049 """Interactively filter patch chunks into applied-only chunks"""
1049 1050 messages = getmessages()
1050 1051
1051 1052 if operation is None:
1052 1053 operation = 'record'
1053 1054
1054 1055 def prompt(skipfile, skipall, query, chunk):
1055 1056 """prompt query, and process base inputs
1056 1057
1057 1058 - y/n for the rest of file
1058 1059 - y/n for the rest
1059 1060 - ? (help)
1060 1061 - q (quit)
1061 1062
1062 1063 Return True/False and possibly updated skipfile and skipall.
1063 1064 """
1064 1065 newpatches = None
1065 1066 if skipall is not None:
1066 1067 return skipall, skipfile, skipall, newpatches
1067 1068 if skipfile is not None:
1068 1069 return skipfile, skipfile, skipall, newpatches
1069 1070 while True:
1070 1071 resps = messages['help'][operation]
1071 1072 r = ui.promptchoice("%s %s" % (query, resps))
1072 1073 ui.write("\n")
1073 1074 if r == 8: # ?
1074 1075 for c, t in ui.extractchoices(resps)[1]:
1075 1076 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1076 1077 continue
1077 1078 elif r == 0: # yes
1078 1079 ret = True
1079 1080 elif r == 1: # no
1080 1081 ret = False
1081 1082 elif r == 2: # Edit patch
1082 1083 if chunk is None:
1083 1084 ui.write(_('cannot edit patch for whole file'))
1084 1085 ui.write("\n")
1085 1086 continue
1086 1087 if chunk.header.binary():
1087 1088 ui.write(_('cannot edit patch for binary file'))
1088 1089 ui.write("\n")
1089 1090 continue
1090 1091 # Patch comment based on the Git one (based on comment at end of
1091 1092 # https://mercurial-scm.org/wiki/RecordExtension)
1092 1093 phelp = '---' + _("""
1093 1094 To remove '-' lines, make them ' ' lines (context).
1094 1095 To remove '+' lines, delete them.
1095 1096 Lines starting with # will be removed from the patch.
1096 1097
1097 1098 If the patch applies cleanly, the edited hunk will immediately be
1098 1099 added to the record list. If it does not apply cleanly, a rejects
1099 1100 file will be generated: you can use that when you try again. If
1100 1101 all lines of the hunk are removed, then the edit is aborted and
1101 1102 the hunk is left unchanged.
1102 1103 """)
1103 1104 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1104 1105 suffix=".diff", text=True)
1105 1106 ncpatchfp = None
1106 1107 try:
1107 1108 # Write the initial patch
1108 1109 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1109 1110 chunk.header.write(f)
1110 1111 chunk.write(f)
1111 1112 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1112 1113 f.close()
1113 1114 # Start the editor and wait for it to complete
1114 1115 editor = ui.geteditor()
1115 1116 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1116 1117 environ={'HGUSER': ui.username()},
1117 1118 blockedtag='filterpatch')
1118 1119 if ret != 0:
1119 1120 ui.warn(_("editor exited with exit code %d\n") % ret)
1120 1121 continue
1121 1122 # Remove comment lines
1122 1123 patchfp = open(patchfn)
1123 1124 ncpatchfp = stringio()
1124 1125 for line in util.iterfile(patchfp):
1125 1126 if not line.startswith('#'):
1126 1127 ncpatchfp.write(line)
1127 1128 patchfp.close()
1128 1129 ncpatchfp.seek(0)
1129 1130 newpatches = parsepatch(ncpatchfp)
1130 1131 finally:
1131 1132 os.unlink(patchfn)
1132 1133 del ncpatchfp
1133 1134 # Signal that the chunk shouldn't be applied as-is, but
1134 1135 # provide the new patch to be used instead.
1135 1136 ret = False
1136 1137 elif r == 3: # Skip
1137 1138 ret = skipfile = False
1138 1139 elif r == 4: # file (Record remaining)
1139 1140 ret = skipfile = True
1140 1141 elif r == 5: # done, skip remaining
1141 1142 ret = skipall = False
1142 1143 elif r == 6: # all
1143 1144 ret = skipall = True
1144 1145 elif r == 7: # quit
1145 1146 raise error.Abort(_('user quit'))
1146 1147 return ret, skipfile, skipall, newpatches
1147 1148
1148 1149 seen = set()
1149 1150 applied = {} # 'filename' -> [] of chunks
1150 1151 skipfile, skipall = None, None
1151 1152 pos, total = 1, sum(len(h.hunks) for h in headers)
1152 1153 for h in headers:
1153 1154 pos += len(h.hunks)
1154 1155 skipfile = None
1155 1156 fixoffset = 0
1156 1157 hdr = ''.join(h.header)
1157 1158 if hdr in seen:
1158 1159 continue
1159 1160 seen.add(hdr)
1160 1161 if skipall is None:
1161 1162 h.pretty(ui)
1162 1163 msg = (_('examine changes to %s?') %
1163 1164 _(' and ').join("'%s'" % f for f in h.files()))
1164 1165 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1165 1166 if not r:
1166 1167 continue
1167 1168 applied[h.filename()] = [h]
1168 1169 if h.allhunks():
1169 1170 applied[h.filename()] += h.hunks
1170 1171 continue
1171 1172 for i, chunk in enumerate(h.hunks):
1172 1173 if skipfile is None and skipall is None:
1173 1174 chunk.pretty(ui)
1174 1175 if total == 1:
1175 1176 msg = messages['single'][operation] % chunk.filename()
1176 1177 else:
1177 1178 idx = pos - len(h.hunks) + i
1178 1179 msg = messages['multiple'][operation] % (idx, total,
1179 1180 chunk.filename())
1180 1181 r, skipfile, skipall, newpatches = prompt(skipfile,
1181 1182 skipall, msg, chunk)
1182 1183 if r:
1183 1184 if fixoffset:
1184 1185 chunk = copy.copy(chunk)
1185 1186 chunk.toline += fixoffset
1186 1187 applied[chunk.filename()].append(chunk)
1187 1188 elif newpatches is not None:
1188 1189 for newpatch in newpatches:
1189 1190 for newhunk in newpatch.hunks:
1190 1191 if fixoffset:
1191 1192 newhunk.toline += fixoffset
1192 1193 applied[newhunk.filename()].append(newhunk)
1193 1194 else:
1194 1195 fixoffset += chunk.removed - chunk.added
1195 1196 return (sum([h for h in applied.itervalues()
1196 1197 if h[0].special() or len(h) > 1], []), {})
1197 1198 class hunk(object):
1198 1199 def __init__(self, desc, num, lr, context):
1199 1200 self.number = num
1200 1201 self.desc = desc
1201 1202 self.hunk = [desc]
1202 1203 self.a = []
1203 1204 self.b = []
1204 1205 self.starta = self.lena = None
1205 1206 self.startb = self.lenb = None
1206 1207 if lr is not None:
1207 1208 if context:
1208 1209 self.read_context_hunk(lr)
1209 1210 else:
1210 1211 self.read_unified_hunk(lr)
1211 1212
1212 1213 def getnormalized(self):
1213 1214 """Return a copy with line endings normalized to LF."""
1214 1215
1215 1216 def normalize(lines):
1216 1217 nlines = []
1217 1218 for line in lines:
1218 1219 if line.endswith('\r\n'):
1219 1220 line = line[:-2] + '\n'
1220 1221 nlines.append(line)
1221 1222 return nlines
1222 1223
1223 1224 # Dummy object, it is rebuilt manually
1224 1225 nh = hunk(self.desc, self.number, None, None)
1225 1226 nh.number = self.number
1226 1227 nh.desc = self.desc
1227 1228 nh.hunk = self.hunk
1228 1229 nh.a = normalize(self.a)
1229 1230 nh.b = normalize(self.b)
1230 1231 nh.starta = self.starta
1231 1232 nh.startb = self.startb
1232 1233 nh.lena = self.lena
1233 1234 nh.lenb = self.lenb
1234 1235 return nh
1235 1236
1236 1237 def read_unified_hunk(self, lr):
1237 1238 m = unidesc.match(self.desc)
1238 1239 if not m:
1239 1240 raise PatchError(_("bad hunk #%d") % self.number)
1240 1241 self.starta, self.lena, self.startb, self.lenb = m.groups()
1241 1242 if self.lena is None:
1242 1243 self.lena = 1
1243 1244 else:
1244 1245 self.lena = int(self.lena)
1245 1246 if self.lenb is None:
1246 1247 self.lenb = 1
1247 1248 else:
1248 1249 self.lenb = int(self.lenb)
1249 1250 self.starta = int(self.starta)
1250 1251 self.startb = int(self.startb)
1251 1252 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1252 1253 self.b)
1253 1254 # if we hit eof before finishing out the hunk, the last line will
1254 1255 # be zero length. Lets try to fix it up.
1255 1256 while len(self.hunk[-1]) == 0:
1256 1257 del self.hunk[-1]
1257 1258 del self.a[-1]
1258 1259 del self.b[-1]
1259 1260 self.lena -= 1
1260 1261 self.lenb -= 1
1261 1262 self._fixnewline(lr)
1262 1263
1263 1264 def read_context_hunk(self, lr):
1264 1265 self.desc = lr.readline()
1265 1266 m = contextdesc.match(self.desc)
1266 1267 if not m:
1267 1268 raise PatchError(_("bad hunk #%d") % self.number)
1268 1269 self.starta, aend = m.groups()
1269 1270 self.starta = int(self.starta)
1270 1271 if aend is None:
1271 1272 aend = self.starta
1272 1273 self.lena = int(aend) - self.starta
1273 1274 if self.starta:
1274 1275 self.lena += 1
1275 1276 for x in xrange(self.lena):
1276 1277 l = lr.readline()
1277 1278 if l.startswith('---'):
1278 1279 # lines addition, old block is empty
1279 1280 lr.push(l)
1280 1281 break
1281 1282 s = l[2:]
1282 1283 if l.startswith('- ') or l.startswith('! '):
1283 1284 u = '-' + s
1284 1285 elif l.startswith(' '):
1285 1286 u = ' ' + s
1286 1287 else:
1287 1288 raise PatchError(_("bad hunk #%d old text line %d") %
1288 1289 (self.number, x))
1289 1290 self.a.append(u)
1290 1291 self.hunk.append(u)
1291 1292
1292 1293 l = lr.readline()
1293 1294 if l.startswith('\ '):
1294 1295 s = self.a[-1][:-1]
1295 1296 self.a[-1] = s
1296 1297 self.hunk[-1] = s
1297 1298 l = lr.readline()
1298 1299 m = contextdesc.match(l)
1299 1300 if not m:
1300 1301 raise PatchError(_("bad hunk #%d") % self.number)
1301 1302 self.startb, bend = m.groups()
1302 1303 self.startb = int(self.startb)
1303 1304 if bend is None:
1304 1305 bend = self.startb
1305 1306 self.lenb = int(bend) - self.startb
1306 1307 if self.startb:
1307 1308 self.lenb += 1
1308 1309 hunki = 1
1309 1310 for x in xrange(self.lenb):
1310 1311 l = lr.readline()
1311 1312 if l.startswith('\ '):
1312 1313 # XXX: the only way to hit this is with an invalid line range.
1313 1314 # The no-eol marker is not counted in the line range, but I
1314 1315 # guess there are diff(1) out there which behave differently.
1315 1316 s = self.b[-1][:-1]
1316 1317 self.b[-1] = s
1317 1318 self.hunk[hunki - 1] = s
1318 1319 continue
1319 1320 if not l:
1320 1321 # line deletions, new block is empty and we hit EOF
1321 1322 lr.push(l)
1322 1323 break
1323 1324 s = l[2:]
1324 1325 if l.startswith('+ ') or l.startswith('! '):
1325 1326 u = '+' + s
1326 1327 elif l.startswith(' '):
1327 1328 u = ' ' + s
1328 1329 elif len(self.b) == 0:
1329 1330 # line deletions, new block is empty
1330 1331 lr.push(l)
1331 1332 break
1332 1333 else:
1333 1334 raise PatchError(_("bad hunk #%d old text line %d") %
1334 1335 (self.number, x))
1335 1336 self.b.append(s)
1336 1337 while True:
1337 1338 if hunki >= len(self.hunk):
1338 1339 h = ""
1339 1340 else:
1340 1341 h = self.hunk[hunki]
1341 1342 hunki += 1
1342 1343 if h == u:
1343 1344 break
1344 1345 elif h.startswith('-'):
1345 1346 continue
1346 1347 else:
1347 1348 self.hunk.insert(hunki - 1, u)
1348 1349 break
1349 1350
1350 1351 if not self.a:
1351 1352 # this happens when lines were only added to the hunk
1352 1353 for x in self.hunk:
1353 1354 if x.startswith('-') or x.startswith(' '):
1354 1355 self.a.append(x)
1355 1356 if not self.b:
1356 1357 # this happens when lines were only deleted from the hunk
1357 1358 for x in self.hunk:
1358 1359 if x.startswith('+') or x.startswith(' '):
1359 1360 self.b.append(x[1:])
1360 1361 # @@ -start,len +start,len @@
1361 1362 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1362 1363 self.startb, self.lenb)
1363 1364 self.hunk[0] = self.desc
1364 1365 self._fixnewline(lr)
1365 1366
1366 1367 def _fixnewline(self, lr):
1367 1368 l = lr.readline()
1368 1369 if l.startswith('\ '):
1369 1370 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1370 1371 else:
1371 1372 lr.push(l)
1372 1373
1373 1374 def complete(self):
1374 1375 return len(self.a) == self.lena and len(self.b) == self.lenb
1375 1376
1376 1377 def _fuzzit(self, old, new, fuzz, toponly):
1377 1378 # this removes context lines from the top and bottom of list 'l'. It
1378 1379 # checks the hunk to make sure only context lines are removed, and then
1379 1380 # returns a new shortened list of lines.
1380 1381 fuzz = min(fuzz, len(old))
1381 1382 if fuzz:
1382 1383 top = 0
1383 1384 bot = 0
1384 1385 hlen = len(self.hunk)
1385 1386 for x in xrange(hlen - 1):
1386 1387 # the hunk starts with the @@ line, so use x+1
1387 1388 if self.hunk[x + 1][0] == ' ':
1388 1389 top += 1
1389 1390 else:
1390 1391 break
1391 1392 if not toponly:
1392 1393 for x in xrange(hlen - 1):
1393 1394 if self.hunk[hlen - bot - 1][0] == ' ':
1394 1395 bot += 1
1395 1396 else:
1396 1397 break
1397 1398
1398 1399 bot = min(fuzz, bot)
1399 1400 top = min(fuzz, top)
1400 1401 return old[top:len(old) - bot], new[top:len(new) - bot], top
1401 1402 return old, new, 0
1402 1403
1403 1404 def fuzzit(self, fuzz, toponly):
1404 1405 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1405 1406 oldstart = self.starta + top
1406 1407 newstart = self.startb + top
1407 1408 # zero length hunk ranges already have their start decremented
1408 1409 if self.lena and oldstart > 0:
1409 1410 oldstart -= 1
1410 1411 if self.lenb and newstart > 0:
1411 1412 newstart -= 1
1412 1413 return old, oldstart, new, newstart
1413 1414
1414 1415 class binhunk(object):
1415 1416 'A binary patch file.'
1416 1417 def __init__(self, lr, fname):
1417 1418 self.text = None
1418 1419 self.delta = False
1419 1420 self.hunk = ['GIT binary patch\n']
1420 1421 self._fname = fname
1421 1422 self._read(lr)
1422 1423
1423 1424 def complete(self):
1424 1425 return self.text is not None
1425 1426
1426 1427 def new(self, lines):
1427 1428 if self.delta:
1428 1429 return [applybindelta(self.text, ''.join(lines))]
1429 1430 return [self.text]
1430 1431
1431 1432 def _read(self, lr):
1432 1433 def getline(lr, hunk):
1433 1434 l = lr.readline()
1434 1435 hunk.append(l)
1435 1436 return l.rstrip('\r\n')
1436 1437
1437 1438 size = 0
1438 1439 while True:
1439 1440 line = getline(lr, self.hunk)
1440 1441 if not line:
1441 1442 raise PatchError(_('could not extract "%s" binary data')
1442 1443 % self._fname)
1443 1444 if line.startswith('literal '):
1444 1445 size = int(line[8:].rstrip())
1445 1446 break
1446 1447 if line.startswith('delta '):
1447 1448 size = int(line[6:].rstrip())
1448 1449 self.delta = True
1449 1450 break
1450 1451 dec = []
1451 1452 line = getline(lr, self.hunk)
1452 1453 while len(line) > 1:
1453 1454 l = line[0]
1454 1455 if l <= 'Z' and l >= 'A':
1455 1456 l = ord(l) - ord('A') + 1
1456 1457 else:
1457 1458 l = ord(l) - ord('a') + 27
1458 1459 try:
1459 1460 dec.append(util.b85decode(line[1:])[:l])
1460 1461 except ValueError as e:
1461 1462 raise PatchError(_('could not decode "%s" binary patch: %s')
1462 1463 % (self._fname, str(e)))
1463 1464 line = getline(lr, self.hunk)
1464 1465 text = zlib.decompress(''.join(dec))
1465 1466 if len(text) != size:
1466 1467 raise PatchError(_('"%s" length is %d bytes, should be %d')
1467 1468 % (self._fname, len(text), size))
1468 1469 self.text = text
1469 1470
1470 1471 def parsefilename(str):
1471 1472 # --- filename \t|space stuff
1472 1473 s = str[4:].rstrip('\r\n')
1473 1474 i = s.find('\t')
1474 1475 if i < 0:
1475 1476 i = s.find(' ')
1476 1477 if i < 0:
1477 1478 return s
1478 1479 return s[:i]
1479 1480
1480 1481 def reversehunks(hunks):
1481 1482 '''reverse the signs in the hunks given as argument
1482 1483
1483 1484 This function operates on hunks coming out of patch.filterpatch, that is
1484 1485 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1485 1486
1486 1487 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1487 1488 ... --- a/folder1/g
1488 1489 ... +++ b/folder1/g
1489 1490 ... @@ -1,7 +1,7 @@
1490 1491 ... +firstline
1491 1492 ... c
1492 1493 ... 1
1493 1494 ... 2
1494 1495 ... + 3
1495 1496 ... -4
1496 1497 ... 5
1497 1498 ... d
1498 1499 ... +lastline"""
1499 1500 >>> hunks = parsepatch([rawpatch])
1500 1501 >>> hunkscomingfromfilterpatch = []
1501 1502 >>> for h in hunks:
1502 1503 ... hunkscomingfromfilterpatch.append(h)
1503 1504 ... hunkscomingfromfilterpatch.extend(h.hunks)
1504 1505
1505 1506 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1506 1507 >>> from . import util
1507 1508 >>> fp = util.stringio()
1508 1509 >>> for c in reversedhunks:
1509 1510 ... c.write(fp)
1510 1511 >>> fp.seek(0) or None
1511 1512 >>> reversedpatch = fp.read()
1512 1513 >>> print(pycompat.sysstr(reversedpatch))
1513 1514 diff --git a/folder1/g b/folder1/g
1514 1515 --- a/folder1/g
1515 1516 +++ b/folder1/g
1516 1517 @@ -1,4 +1,3 @@
1517 1518 -firstline
1518 1519 c
1519 1520 1
1520 1521 2
1521 1522 @@ -2,6 +1,6 @@
1522 1523 c
1523 1524 1
1524 1525 2
1525 1526 - 3
1526 1527 +4
1527 1528 5
1528 1529 d
1529 1530 @@ -6,3 +5,2 @@
1530 1531 5
1531 1532 d
1532 1533 -lastline
1533 1534
1534 1535 '''
1535 1536
1536 1537 newhunks = []
1537 1538 for c in hunks:
1538 1539 if util.safehasattr(c, 'reversehunk'):
1539 1540 c = c.reversehunk()
1540 1541 newhunks.append(c)
1541 1542 return newhunks
1542 1543
1543 1544 def parsepatch(originalchunks, maxcontext=None):
1544 1545 """patch -> [] of headers -> [] of hunks
1545 1546
1546 1547 If maxcontext is not None, trim context lines if necessary.
1547 1548
1548 1549 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1549 1550 ... --- a/folder1/g
1550 1551 ... +++ b/folder1/g
1551 1552 ... @@ -1,8 +1,10 @@
1552 1553 ... 1
1553 1554 ... 2
1554 1555 ... -3
1555 1556 ... 4
1556 1557 ... 5
1557 1558 ... 6
1558 1559 ... +6.1
1559 1560 ... +6.2
1560 1561 ... 7
1561 1562 ... 8
1562 1563 ... +9'''
1563 1564 >>> out = util.stringio()
1564 1565 >>> headers = parsepatch([rawpatch], maxcontext=1)
1565 1566 >>> for header in headers:
1566 1567 ... header.write(out)
1567 1568 ... for hunk in header.hunks:
1568 1569 ... hunk.write(out)
1569 1570 >>> print(pycompat.sysstr(out.getvalue()))
1570 1571 diff --git a/folder1/g b/folder1/g
1571 1572 --- a/folder1/g
1572 1573 +++ b/folder1/g
1573 1574 @@ -2,3 +2,2 @@
1574 1575 2
1575 1576 -3
1576 1577 4
1577 1578 @@ -6,2 +5,4 @@
1578 1579 6
1579 1580 +6.1
1580 1581 +6.2
1581 1582 7
1582 1583 @@ -8,1 +9,2 @@
1583 1584 8
1584 1585 +9
1585 1586 """
1586 1587 class parser(object):
1587 1588 """patch parsing state machine"""
1588 1589 def __init__(self):
1589 1590 self.fromline = 0
1590 1591 self.toline = 0
1591 1592 self.proc = ''
1592 1593 self.header = None
1593 1594 self.context = []
1594 1595 self.before = []
1595 1596 self.hunk = []
1596 1597 self.headers = []
1597 1598
1598 1599 def addrange(self, limits):
1599 1600 fromstart, fromend, tostart, toend, proc = limits
1600 1601 self.fromline = int(fromstart)
1601 1602 self.toline = int(tostart)
1602 1603 self.proc = proc
1603 1604
1604 1605 def addcontext(self, context):
1605 1606 if self.hunk:
1606 1607 h = recordhunk(self.header, self.fromline, self.toline,
1607 1608 self.proc, self.before, self.hunk, context, maxcontext)
1608 1609 self.header.hunks.append(h)
1609 1610 self.fromline += len(self.before) + h.removed
1610 1611 self.toline += len(self.before) + h.added
1611 1612 self.before = []
1612 1613 self.hunk = []
1613 1614 self.context = context
1614 1615
1615 1616 def addhunk(self, hunk):
1616 1617 if self.context:
1617 1618 self.before = self.context
1618 1619 self.context = []
1619 1620 self.hunk = hunk
1620 1621
1621 1622 def newfile(self, hdr):
1622 1623 self.addcontext([])
1623 1624 h = header(hdr)
1624 1625 self.headers.append(h)
1625 1626 self.header = h
1626 1627
1627 1628 def addother(self, line):
1628 1629 pass # 'other' lines are ignored
1629 1630
1630 1631 def finished(self):
1631 1632 self.addcontext([])
1632 1633 return self.headers
1633 1634
1634 1635 transitions = {
1635 1636 'file': {'context': addcontext,
1636 1637 'file': newfile,
1637 1638 'hunk': addhunk,
1638 1639 'range': addrange},
1639 1640 'context': {'file': newfile,
1640 1641 'hunk': addhunk,
1641 1642 'range': addrange,
1642 1643 'other': addother},
1643 1644 'hunk': {'context': addcontext,
1644 1645 'file': newfile,
1645 1646 'range': addrange},
1646 1647 'range': {'context': addcontext,
1647 1648 'hunk': addhunk},
1648 1649 'other': {'other': addother},
1649 1650 }
1650 1651
1651 1652 p = parser()
1652 1653 fp = stringio()
1653 1654 fp.write(''.join(originalchunks))
1654 1655 fp.seek(0)
1655 1656
1656 1657 state = 'context'
1657 1658 for newstate, data in scanpatch(fp):
1658 1659 try:
1659 1660 p.transitions[state][newstate](p, data)
1660 1661 except KeyError:
1661 1662 raise PatchError('unhandled transition: %s -> %s' %
1662 1663 (state, newstate))
1663 1664 state = newstate
1664 1665 del fp
1665 1666 return p.finished()
1666 1667
1667 1668 def pathtransform(path, strip, prefix):
1668 1669 '''turn a path from a patch into a path suitable for the repository
1669 1670
1670 1671 prefix, if not empty, is expected to be normalized with a / at the end.
1671 1672
1672 1673 Returns (stripped components, path in repository).
1673 1674
1674 1675 >>> pathtransform(b'a/b/c', 0, b'')
1675 1676 ('', 'a/b/c')
1676 1677 >>> pathtransform(b' a/b/c ', 0, b'')
1677 1678 ('', ' a/b/c')
1678 1679 >>> pathtransform(b' a/b/c ', 2, b'')
1679 1680 ('a/b/', 'c')
1680 1681 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1681 1682 ('', 'd/e/a/b/c')
1682 1683 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1683 1684 ('a//b/', 'd/e/c')
1684 1685 >>> pathtransform(b'a/b/c', 3, b'')
1685 1686 Traceback (most recent call last):
1686 1687 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1687 1688 '''
1688 1689 pathlen = len(path)
1689 1690 i = 0
1690 1691 if strip == 0:
1691 1692 return '', prefix + path.rstrip()
1692 1693 count = strip
1693 1694 while count > 0:
1694 1695 i = path.find('/', i)
1695 1696 if i == -1:
1696 1697 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1697 1698 (count, strip, path))
1698 1699 i += 1
1699 1700 # consume '//' in the path
1700 1701 while i < pathlen - 1 and path[i:i + 1] == '/':
1701 1702 i += 1
1702 1703 count -= 1
1703 1704 return path[:i].lstrip(), prefix + path[i:].rstrip()
1704 1705
1705 1706 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1706 1707 nulla = afile_orig == "/dev/null"
1707 1708 nullb = bfile_orig == "/dev/null"
1708 1709 create = nulla and hunk.starta == 0 and hunk.lena == 0
1709 1710 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1710 1711 abase, afile = pathtransform(afile_orig, strip, prefix)
1711 1712 gooda = not nulla and backend.exists(afile)
1712 1713 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1713 1714 if afile == bfile:
1714 1715 goodb = gooda
1715 1716 else:
1716 1717 goodb = not nullb and backend.exists(bfile)
1717 1718 missing = not goodb and not gooda and not create
1718 1719
1719 1720 # some diff programs apparently produce patches where the afile is
1720 1721 # not /dev/null, but afile starts with bfile
1721 1722 abasedir = afile[:afile.rfind('/') + 1]
1722 1723 bbasedir = bfile[:bfile.rfind('/') + 1]
1723 1724 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1724 1725 and hunk.starta == 0 and hunk.lena == 0):
1725 1726 create = True
1726 1727 missing = False
1727 1728
1728 1729 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1729 1730 # diff is between a file and its backup. In this case, the original
1730 1731 # file should be patched (see original mpatch code).
1731 1732 isbackup = (abase == bbase and bfile.startswith(afile))
1732 1733 fname = None
1733 1734 if not missing:
1734 1735 if gooda and goodb:
1735 1736 if isbackup:
1736 1737 fname = afile
1737 1738 else:
1738 1739 fname = bfile
1739 1740 elif gooda:
1740 1741 fname = afile
1741 1742
1742 1743 if not fname:
1743 1744 if not nullb:
1744 1745 if isbackup:
1745 1746 fname = afile
1746 1747 else:
1747 1748 fname = bfile
1748 1749 elif not nulla:
1749 1750 fname = afile
1750 1751 else:
1751 1752 raise PatchError(_("undefined source and destination files"))
1752 1753
1753 1754 gp = patchmeta(fname)
1754 1755 if create:
1755 1756 gp.op = 'ADD'
1756 1757 elif remove:
1757 1758 gp.op = 'DELETE'
1758 1759 return gp
1759 1760
1760 1761 def scanpatch(fp):
1761 1762 """like patch.iterhunks, but yield different events
1762 1763
1763 1764 - ('file', [header_lines + fromfile + tofile])
1764 1765 - ('context', [context_lines])
1765 1766 - ('hunk', [hunk_lines])
1766 1767 - ('range', (-start,len, +start,len, proc))
1767 1768 """
1768 1769 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1769 1770 lr = linereader(fp)
1770 1771
1771 1772 def scanwhile(first, p):
1772 1773 """scan lr while predicate holds"""
1773 1774 lines = [first]
1774 1775 for line in iter(lr.readline, ''):
1775 1776 if p(line):
1776 1777 lines.append(line)
1777 1778 else:
1778 1779 lr.push(line)
1779 1780 break
1780 1781 return lines
1781 1782
1782 1783 for line in iter(lr.readline, ''):
1783 1784 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1784 1785 def notheader(line):
1785 1786 s = line.split(None, 1)
1786 1787 return not s or s[0] not in ('---', 'diff')
1787 1788 header = scanwhile(line, notheader)
1788 1789 fromfile = lr.readline()
1789 1790 if fromfile.startswith('---'):
1790 1791 tofile = lr.readline()
1791 1792 header += [fromfile, tofile]
1792 1793 else:
1793 1794 lr.push(fromfile)
1794 1795 yield 'file', header
1795 1796 elif line[0:1] == ' ':
1796 1797 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1797 1798 elif line[0] in '-+':
1798 1799 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1799 1800 else:
1800 1801 m = lines_re.match(line)
1801 1802 if m:
1802 1803 yield 'range', m.groups()
1803 1804 else:
1804 1805 yield 'other', line
1805 1806
1806 1807 def scangitpatch(lr, firstline):
1807 1808 """
1808 1809 Git patches can emit:
1809 1810 - rename a to b
1810 1811 - change b
1811 1812 - copy a to c
1812 1813 - change c
1813 1814
1814 1815 We cannot apply this sequence as-is, the renamed 'a' could not be
1815 1816 found for it would have been renamed already. And we cannot copy
1816 1817 from 'b' instead because 'b' would have been changed already. So
1817 1818 we scan the git patch for copy and rename commands so we can
1818 1819 perform the copies ahead of time.
1819 1820 """
1820 1821 pos = 0
1821 1822 try:
1822 1823 pos = lr.fp.tell()
1823 1824 fp = lr.fp
1824 1825 except IOError:
1825 1826 fp = stringio(lr.fp.read())
1826 1827 gitlr = linereader(fp)
1827 1828 gitlr.push(firstline)
1828 1829 gitpatches = readgitpatch(gitlr)
1829 1830 fp.seek(pos)
1830 1831 return gitpatches
1831 1832
1832 1833 def iterhunks(fp):
1833 1834 """Read a patch and yield the following events:
1834 1835 - ("file", afile, bfile, firsthunk): select a new target file.
1835 1836 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1836 1837 "file" event.
1837 1838 - ("git", gitchanges): current diff is in git format, gitchanges
1838 1839 maps filenames to gitpatch records. Unique event.
1839 1840 """
1840 1841 afile = ""
1841 1842 bfile = ""
1842 1843 state = None
1843 1844 hunknum = 0
1844 1845 emitfile = newfile = False
1845 1846 gitpatches = None
1846 1847
1847 1848 # our states
1848 1849 BFILE = 1
1849 1850 context = None
1850 1851 lr = linereader(fp)
1851 1852
1852 1853 for x in iter(lr.readline, ''):
1853 1854 if state == BFILE and (
1854 1855 (not context and x[0] == '@')
1855 1856 or (context is not False and x.startswith('***************'))
1856 1857 or x.startswith('GIT binary patch')):
1857 1858 gp = None
1858 1859 if (gitpatches and
1859 1860 gitpatches[-1].ispatching(afile, bfile)):
1860 1861 gp = gitpatches.pop()
1861 1862 if x.startswith('GIT binary patch'):
1862 1863 h = binhunk(lr, gp.path)
1863 1864 else:
1864 1865 if context is None and x.startswith('***************'):
1865 1866 context = True
1866 1867 h = hunk(x, hunknum + 1, lr, context)
1867 1868 hunknum += 1
1868 1869 if emitfile:
1869 1870 emitfile = False
1870 1871 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1871 1872 yield 'hunk', h
1872 1873 elif x.startswith('diff --git a/'):
1873 1874 m = gitre.match(x.rstrip(' \r\n'))
1874 1875 if not m:
1875 1876 continue
1876 1877 if gitpatches is None:
1877 1878 # scan whole input for git metadata
1878 1879 gitpatches = scangitpatch(lr, x)
1879 1880 yield 'git', [g.copy() for g in gitpatches
1880 1881 if g.op in ('COPY', 'RENAME')]
1881 1882 gitpatches.reverse()
1882 1883 afile = 'a/' + m.group(1)
1883 1884 bfile = 'b/' + m.group(2)
1884 1885 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1885 1886 gp = gitpatches.pop()
1886 1887 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1887 1888 if not gitpatches:
1888 1889 raise PatchError(_('failed to synchronize metadata for "%s"')
1889 1890 % afile[2:])
1890 1891 gp = gitpatches[-1]
1891 1892 newfile = True
1892 1893 elif x.startswith('---'):
1893 1894 # check for a unified diff
1894 1895 l2 = lr.readline()
1895 1896 if not l2.startswith('+++'):
1896 1897 lr.push(l2)
1897 1898 continue
1898 1899 newfile = True
1899 1900 context = False
1900 1901 afile = parsefilename(x)
1901 1902 bfile = parsefilename(l2)
1902 1903 elif x.startswith('***'):
1903 1904 # check for a context diff
1904 1905 l2 = lr.readline()
1905 1906 if not l2.startswith('---'):
1906 1907 lr.push(l2)
1907 1908 continue
1908 1909 l3 = lr.readline()
1909 1910 lr.push(l3)
1910 1911 if not l3.startswith("***************"):
1911 1912 lr.push(l2)
1912 1913 continue
1913 1914 newfile = True
1914 1915 context = True
1915 1916 afile = parsefilename(x)
1916 1917 bfile = parsefilename(l2)
1917 1918
1918 1919 if newfile:
1919 1920 newfile = False
1920 1921 emitfile = True
1921 1922 state = BFILE
1922 1923 hunknum = 0
1923 1924
1924 1925 while gitpatches:
1925 1926 gp = gitpatches.pop()
1926 1927 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1927 1928
1928 1929 def applybindelta(binchunk, data):
1929 1930 """Apply a binary delta hunk
1930 1931 The algorithm used is the algorithm from git's patch-delta.c
1931 1932 """
1932 1933 def deltahead(binchunk):
1933 1934 i = 0
1934 1935 for c in binchunk:
1935 1936 i += 1
1936 1937 if not (ord(c) & 0x80):
1937 1938 return i
1938 1939 return i
1939 1940 out = ""
1940 1941 s = deltahead(binchunk)
1941 1942 binchunk = binchunk[s:]
1942 1943 s = deltahead(binchunk)
1943 1944 binchunk = binchunk[s:]
1944 1945 i = 0
1945 1946 while i < len(binchunk):
1946 1947 cmd = ord(binchunk[i])
1947 1948 i += 1
1948 1949 if (cmd & 0x80):
1949 1950 offset = 0
1950 1951 size = 0
1951 1952 if (cmd & 0x01):
1952 1953 offset = ord(binchunk[i])
1953 1954 i += 1
1954 1955 if (cmd & 0x02):
1955 1956 offset |= ord(binchunk[i]) << 8
1956 1957 i += 1
1957 1958 if (cmd & 0x04):
1958 1959 offset |= ord(binchunk[i]) << 16
1959 1960 i += 1
1960 1961 if (cmd & 0x08):
1961 1962 offset |= ord(binchunk[i]) << 24
1962 1963 i += 1
1963 1964 if (cmd & 0x10):
1964 1965 size = ord(binchunk[i])
1965 1966 i += 1
1966 1967 if (cmd & 0x20):
1967 1968 size |= ord(binchunk[i]) << 8
1968 1969 i += 1
1969 1970 if (cmd & 0x40):
1970 1971 size |= ord(binchunk[i]) << 16
1971 1972 i += 1
1972 1973 if size == 0:
1973 1974 size = 0x10000
1974 1975 offset_end = offset + size
1975 1976 out += data[offset:offset_end]
1976 1977 elif cmd != 0:
1977 1978 offset_end = i + cmd
1978 1979 out += binchunk[i:offset_end]
1979 1980 i += cmd
1980 1981 else:
1981 1982 raise PatchError(_('unexpected delta opcode 0'))
1982 1983 return out
1983 1984
1984 1985 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1985 1986 """Reads a patch from fp and tries to apply it.
1986 1987
1987 1988 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1988 1989 there was any fuzz.
1989 1990
1990 1991 If 'eolmode' is 'strict', the patch content and patched file are
1991 1992 read in binary mode. Otherwise, line endings are ignored when
1992 1993 patching then normalized according to 'eolmode'.
1993 1994 """
1994 1995 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1995 1996 prefix=prefix, eolmode=eolmode)
1996 1997
1997 1998 def _canonprefix(repo, prefix):
1998 1999 if prefix:
1999 2000 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2000 2001 if prefix != '':
2001 2002 prefix += '/'
2002 2003 return prefix
2003 2004
2004 2005 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2005 2006 eolmode='strict'):
2006 2007 prefix = _canonprefix(backend.repo, prefix)
2007 2008 def pstrip(p):
2008 2009 return pathtransform(p, strip - 1, prefix)[1]
2009 2010
2010 2011 rejects = 0
2011 2012 err = 0
2012 2013 current_file = None
2013 2014
2014 2015 for state, values in iterhunks(fp):
2015 2016 if state == 'hunk':
2016 2017 if not current_file:
2017 2018 continue
2018 2019 ret = current_file.apply(values)
2019 2020 if ret > 0:
2020 2021 err = 1
2021 2022 elif state == 'file':
2022 2023 if current_file:
2023 2024 rejects += current_file.close()
2024 2025 current_file = None
2025 2026 afile, bfile, first_hunk, gp = values
2026 2027 if gp:
2027 2028 gp.path = pstrip(gp.path)
2028 2029 if gp.oldpath:
2029 2030 gp.oldpath = pstrip(gp.oldpath)
2030 2031 else:
2031 2032 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2032 2033 prefix)
2033 2034 if gp.op == 'RENAME':
2034 2035 backend.unlink(gp.oldpath)
2035 2036 if not first_hunk:
2036 2037 if gp.op == 'DELETE':
2037 2038 backend.unlink(gp.path)
2038 2039 continue
2039 2040 data, mode = None, None
2040 2041 if gp.op in ('RENAME', 'COPY'):
2041 2042 data, mode = store.getfile(gp.oldpath)[:2]
2042 2043 if data is None:
2043 2044 # This means that the old path does not exist
2044 2045 raise PatchError(_("source file '%s' does not exist")
2045 2046 % gp.oldpath)
2046 2047 if gp.mode:
2047 2048 mode = gp.mode
2048 2049 if gp.op == 'ADD':
2049 2050 # Added files without content have no hunk and
2050 2051 # must be created
2051 2052 data = ''
2052 2053 if data or mode:
2053 2054 if (gp.op in ('ADD', 'RENAME', 'COPY')
2054 2055 and backend.exists(gp.path)):
2055 2056 raise PatchError(_("cannot create %s: destination "
2056 2057 "already exists") % gp.path)
2057 2058 backend.setfile(gp.path, data, mode, gp.oldpath)
2058 2059 continue
2059 2060 try:
2060 2061 current_file = patcher(ui, gp, backend, store,
2061 2062 eolmode=eolmode)
2062 2063 except PatchError as inst:
2063 2064 ui.warn(str(inst) + '\n')
2064 2065 current_file = None
2065 2066 rejects += 1
2066 2067 continue
2067 2068 elif state == 'git':
2068 2069 for gp in values:
2069 2070 path = pstrip(gp.oldpath)
2070 2071 data, mode = backend.getfile(path)
2071 2072 if data is None:
2072 2073 # The error ignored here will trigger a getfile()
2073 2074 # error in a place more appropriate for error
2074 2075 # handling, and will not interrupt the patching
2075 2076 # process.
2076 2077 pass
2077 2078 else:
2078 2079 store.setfile(path, data, mode)
2079 2080 else:
2080 2081 raise error.Abort(_('unsupported parser state: %s') % state)
2081 2082
2082 2083 if current_file:
2083 2084 rejects += current_file.close()
2084 2085
2085 2086 if rejects:
2086 2087 return -1
2087 2088 return err
2088 2089
2089 2090 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2090 2091 similarity):
2091 2092 """use <patcher> to apply <patchname> to the working directory.
2092 2093 returns whether patch was applied with fuzz factor."""
2093 2094
2094 2095 fuzz = False
2095 2096 args = []
2096 2097 cwd = repo.root
2097 2098 if cwd:
2098 2099 args.append('-d %s' % util.shellquote(cwd))
2099 2100 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2100 2101 util.shellquote(patchname)))
2101 2102 try:
2102 2103 for line in util.iterfile(fp):
2103 2104 line = line.rstrip()
2104 2105 ui.note(line + '\n')
2105 2106 if line.startswith('patching file '):
2106 2107 pf = util.parsepatchoutput(line)
2107 2108 printed_file = False
2108 2109 files.add(pf)
2109 2110 elif line.find('with fuzz') >= 0:
2110 2111 fuzz = True
2111 2112 if not printed_file:
2112 2113 ui.warn(pf + '\n')
2113 2114 printed_file = True
2114 2115 ui.warn(line + '\n')
2115 2116 elif line.find('saving rejects to file') >= 0:
2116 2117 ui.warn(line + '\n')
2117 2118 elif line.find('FAILED') >= 0:
2118 2119 if not printed_file:
2119 2120 ui.warn(pf + '\n')
2120 2121 printed_file = True
2121 2122 ui.warn(line + '\n')
2122 2123 finally:
2123 2124 if files:
2124 2125 scmutil.marktouched(repo, files, similarity)
2125 2126 code = fp.close()
2126 2127 if code:
2127 2128 raise PatchError(_("patch command failed: %s") %
2128 2129 util.explainexit(code)[0])
2129 2130 return fuzz
2130 2131
2131 2132 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2132 2133 eolmode='strict'):
2133 2134 if files is None:
2134 2135 files = set()
2135 2136 if eolmode is None:
2136 2137 eolmode = ui.config('patch', 'eol')
2137 2138 if eolmode.lower() not in eolmodes:
2138 2139 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2139 2140 eolmode = eolmode.lower()
2140 2141
2141 2142 store = filestore()
2142 2143 try:
2143 2144 fp = open(patchobj, 'rb')
2144 2145 except TypeError:
2145 2146 fp = patchobj
2146 2147 try:
2147 2148 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2148 2149 eolmode=eolmode)
2149 2150 finally:
2150 2151 if fp != patchobj:
2151 2152 fp.close()
2152 2153 files.update(backend.close())
2153 2154 store.close()
2154 2155 if ret < 0:
2155 2156 raise PatchError(_('patch failed to apply'))
2156 2157 return ret > 0
2157 2158
2158 2159 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2159 2160 eolmode='strict', similarity=0):
2160 2161 """use builtin patch to apply <patchobj> to the working directory.
2161 2162 returns whether patch was applied with fuzz factor."""
2162 2163 backend = workingbackend(ui, repo, similarity)
2163 2164 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2164 2165
2165 2166 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2166 2167 eolmode='strict'):
2167 2168 backend = repobackend(ui, repo, ctx, store)
2168 2169 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2169 2170
2170 2171 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2171 2172 similarity=0):
2172 2173 """Apply <patchname> to the working directory.
2173 2174
2174 2175 'eolmode' specifies how end of lines should be handled. It can be:
2175 2176 - 'strict': inputs are read in binary mode, EOLs are preserved
2176 2177 - 'crlf': EOLs are ignored when patching and reset to CRLF
2177 2178 - 'lf': EOLs are ignored when patching and reset to LF
2178 2179 - None: get it from user settings, default to 'strict'
2179 2180 'eolmode' is ignored when using an external patcher program.
2180 2181
2181 2182 Returns whether patch was applied with fuzz factor.
2182 2183 """
2183 2184 patcher = ui.config('ui', 'patch')
2184 2185 if files is None:
2185 2186 files = set()
2186 2187 if patcher:
2187 2188 return _externalpatch(ui, repo, patcher, patchname, strip,
2188 2189 files, similarity)
2189 2190 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2190 2191 similarity)
2191 2192
2192 2193 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2193 2194 backend = fsbackend(ui, repo.root)
2194 2195 prefix = _canonprefix(repo, prefix)
2195 2196 with open(patchpath, 'rb') as fp:
2196 2197 changed = set()
2197 2198 for state, values in iterhunks(fp):
2198 2199 if state == 'file':
2199 2200 afile, bfile, first_hunk, gp = values
2200 2201 if gp:
2201 2202 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2202 2203 if gp.oldpath:
2203 2204 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2204 2205 prefix)[1]
2205 2206 else:
2206 2207 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2207 2208 prefix)
2208 2209 changed.add(gp.path)
2209 2210 if gp.op == 'RENAME':
2210 2211 changed.add(gp.oldpath)
2211 2212 elif state not in ('hunk', 'git'):
2212 2213 raise error.Abort(_('unsupported parser state: %s') % state)
2213 2214 return changed
2214 2215
2215 2216 class GitDiffRequired(Exception):
2216 2217 pass
2217 2218
2218 2219 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2219 2220 '''return diffopts with all features supported and parsed'''
2220 2221 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2221 2222 git=True, whitespace=True, formatchanging=True)
2222 2223
2223 2224 diffopts = diffallopts
2224 2225
2225 2226 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2226 2227 whitespace=False, formatchanging=False):
2227 2228 '''return diffopts with only opted-in features parsed
2228 2229
2229 2230 Features:
2230 2231 - git: git-style diffs
2231 2232 - whitespace: whitespace options like ignoreblanklines and ignorews
2232 2233 - formatchanging: options that will likely break or cause correctness issues
2233 2234 with most diff parsers
2234 2235 '''
2235 2236 def get(key, name=None, getter=ui.configbool, forceplain=None):
2236 2237 if opts:
2237 2238 v = opts.get(key)
2238 2239 # diffopts flags are either None-default (which is passed
2239 2240 # through unchanged, so we can identify unset values), or
2240 2241 # some other falsey default (eg --unified, which defaults
2241 2242 # to an empty string). We only want to override the config
2242 2243 # entries from hgrc with command line values if they
2243 2244 # appear to have been set, which is any truthy value,
2244 2245 # True, or False.
2245 2246 if v or isinstance(v, bool):
2246 2247 return v
2247 2248 if forceplain is not None and ui.plain():
2248 2249 return forceplain
2249 2250 return getter(section, name or key, untrusted=untrusted)
2250 2251
2251 2252 # core options, expected to be understood by every diff parser
2252 2253 buildopts = {
2253 2254 'nodates': get('nodates'),
2254 2255 'showfunc': get('show_function', 'showfunc'),
2255 2256 'context': get('unified', getter=ui.config),
2256 2257 }
2257 2258 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
2258 2259
2259 2260 if git:
2260 2261 buildopts['git'] = get('git')
2261 2262
2262 2263 # since this is in the experimental section, we need to call
2263 2264 # ui.configbool directory
2264 2265 buildopts['showsimilarity'] = ui.configbool('experimental',
2265 2266 'extendedheader.similarity')
2266 2267
2267 2268 # need to inspect the ui object instead of using get() since we want to
2268 2269 # test for an int
2269 2270 hconf = ui.config('experimental', 'extendedheader.index')
2270 2271 if hconf is not None:
2271 2272 hlen = None
2272 2273 try:
2273 2274 # the hash config could be an integer (for length of hash) or a
2274 2275 # word (e.g. short, full, none)
2275 2276 hlen = int(hconf)
2276 2277 if hlen < 0 or hlen > 40:
2277 2278 msg = _("invalid length for extendedheader.index: '%d'\n")
2278 2279 ui.warn(msg % hlen)
2279 2280 except ValueError:
2280 2281 # default value
2281 2282 if hconf == 'short' or hconf == '':
2282 2283 hlen = 12
2283 2284 elif hconf == 'full':
2284 2285 hlen = 40
2285 2286 elif hconf != 'none':
2286 2287 msg = _("invalid value for extendedheader.index: '%s'\n")
2287 2288 ui.warn(msg % hconf)
2288 2289 finally:
2289 2290 buildopts['index'] = hlen
2290 2291
2291 2292 if whitespace:
2292 2293 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2293 2294 buildopts['ignorewsamount'] = get('ignore_space_change',
2294 2295 'ignorewsamount')
2295 2296 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2296 2297 'ignoreblanklines')
2297 2298 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2298 2299 if formatchanging:
2299 2300 buildopts['text'] = opts and opts.get('text')
2300 2301 binary = None if opts is None else opts.get('binary')
2301 2302 buildopts['nobinary'] = (not binary if binary is not None
2302 2303 else get('nobinary', forceplain=False))
2303 2304 buildopts['noprefix'] = get('noprefix', forceplain=False)
2304 2305
2305 2306 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2306 2307
2307 2308 def diff(repo, node1=None, node2=None, match=None, changes=None,
2308 2309 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2309 2310 hunksfilterfn=None):
2310 2311 '''yields diff of changes to files between two nodes, or node and
2311 2312 working directory.
2312 2313
2313 2314 if node1 is None, use first dirstate parent instead.
2314 2315 if node2 is None, compare node1 with working directory.
2315 2316
2316 2317 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2317 2318 every time some change cannot be represented with the current
2318 2319 patch format. Return False to upgrade to git patch format, True to
2319 2320 accept the loss or raise an exception to abort the diff. It is
2320 2321 called with the name of current file being diffed as 'fn'. If set
2321 2322 to None, patches will always be upgraded to git format when
2322 2323 necessary.
2323 2324
2324 2325 prefix is a filename prefix that is prepended to all filenames on
2325 2326 display (used for subrepos).
2326 2327
2327 2328 relroot, if not empty, must be normalized with a trailing /. Any match
2328 2329 patterns that fall outside it will be ignored.
2329 2330
2330 2331 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2331 2332 information.
2332 2333
2333 2334 hunksfilterfn, if not None, should be a function taking a filectx and
2334 2335 hunks generator that may yield filtered hunks.
2335 2336 '''
2336 2337 for fctx1, fctx2, hdr, hunks in diffhunks(
2337 2338 repo, node1=node1, node2=node2,
2338 2339 match=match, changes=changes, opts=opts,
2339 2340 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2340 2341 ):
2341 2342 if hunksfilterfn is not None:
2342 2343 # If the file has been removed, fctx2 is None; but this should
2343 2344 # not occur here since we catch removed files early in
2344 2345 # cmdutil.getloglinerangerevs() for 'hg log -L'.
2345 2346 assert fctx2 is not None, \
2346 2347 'fctx2 unexpectly None in diff hunks filtering'
2347 2348 hunks = hunksfilterfn(fctx2, hunks)
2348 2349 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2349 2350 if hdr and (text or len(hdr) > 1):
2350 2351 yield '\n'.join(hdr) + '\n'
2351 2352 if text:
2352 2353 yield text
2353 2354
2354 2355 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2355 2356 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2356 2357 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2357 2358 where `header` is a list of diff headers and `hunks` is an iterable of
2358 2359 (`hunkrange`, `hunklines`) tuples.
2359 2360
2360 2361 See diff() for the meaning of parameters.
2361 2362 """
2362 2363
2363 2364 if opts is None:
2364 2365 opts = mdiff.defaultopts
2365 2366
2366 2367 if not node1 and not node2:
2367 2368 node1 = repo.dirstate.p1()
2368 2369
2369 2370 def lrugetfilectx():
2370 2371 cache = {}
2371 2372 order = collections.deque()
2372 2373 def getfilectx(f, ctx):
2373 2374 fctx = ctx.filectx(f, filelog=cache.get(f))
2374 2375 if f not in cache:
2375 2376 if len(cache) > 20:
2376 2377 del cache[order.popleft()]
2377 2378 cache[f] = fctx.filelog()
2378 2379 else:
2379 2380 order.remove(f)
2380 2381 order.append(f)
2381 2382 return fctx
2382 2383 return getfilectx
2383 2384 getfilectx = lrugetfilectx()
2384 2385
2385 2386 ctx1 = repo[node1]
2386 2387 ctx2 = repo[node2]
2387 2388
2388 2389 relfiltered = False
2389 2390 if relroot != '' and match.always():
2390 2391 # as a special case, create a new matcher with just the relroot
2391 2392 pats = [relroot]
2392 2393 match = scmutil.match(ctx2, pats, default='path')
2393 2394 relfiltered = True
2394 2395
2395 2396 if not changes:
2396 2397 changes = repo.status(ctx1, ctx2, match=match)
2397 2398 modified, added, removed = changes[:3]
2398 2399
2399 2400 if not modified and not added and not removed:
2400 2401 return []
2401 2402
2402 2403 if repo.ui.debugflag:
2403 2404 hexfunc = hex
2404 2405 else:
2405 2406 hexfunc = short
2406 2407 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2407 2408
2408 2409 if copy is None:
2409 2410 copy = {}
2410 2411 if opts.git or opts.upgrade:
2411 2412 copy = copies.pathcopies(ctx1, ctx2, match=match)
2412 2413
2413 2414 if relroot is not None:
2414 2415 if not relfiltered:
2415 2416 # XXX this would ideally be done in the matcher, but that is
2416 2417 # generally meant to 'or' patterns, not 'and' them. In this case we
2417 2418 # need to 'and' all the patterns from the matcher with relroot.
2418 2419 def filterrel(l):
2419 2420 return [f for f in l if f.startswith(relroot)]
2420 2421 modified = filterrel(modified)
2421 2422 added = filterrel(added)
2422 2423 removed = filterrel(removed)
2423 2424 relfiltered = True
2424 2425 # filter out copies where either side isn't inside the relative root
2425 2426 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2426 2427 if dst.startswith(relroot)
2427 2428 and src.startswith(relroot)))
2428 2429
2429 2430 modifiedset = set(modified)
2430 2431 addedset = set(added)
2431 2432 removedset = set(removed)
2432 2433 for f in modified:
2433 2434 if f not in ctx1:
2434 2435 # Fix up added, since merged-in additions appear as
2435 2436 # modifications during merges
2436 2437 modifiedset.remove(f)
2437 2438 addedset.add(f)
2438 2439 for f in removed:
2439 2440 if f not in ctx1:
2440 2441 # Merged-in additions that are then removed are reported as removed.
2441 2442 # They are not in ctx1, so We don't want to show them in the diff.
2442 2443 removedset.remove(f)
2443 2444 modified = sorted(modifiedset)
2444 2445 added = sorted(addedset)
2445 2446 removed = sorted(removedset)
2446 2447 for dst, src in list(copy.items()):
2447 2448 if src not in ctx1:
2448 2449 # Files merged in during a merge and then copied/renamed are
2449 2450 # reported as copies. We want to show them in the diff as additions.
2450 2451 del copy[dst]
2451 2452
2452 2453 def difffn(opts, losedata):
2453 2454 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2454 2455 copy, getfilectx, opts, losedata, prefix, relroot)
2455 2456 if opts.upgrade and not opts.git:
2456 2457 try:
2457 2458 def losedata(fn):
2458 2459 if not losedatafn or not losedatafn(fn=fn):
2459 2460 raise GitDiffRequired
2460 2461 # Buffer the whole output until we are sure it can be generated
2461 2462 return list(difffn(opts.copy(git=False), losedata))
2462 2463 except GitDiffRequired:
2463 2464 return difffn(opts.copy(git=True), None)
2464 2465 else:
2465 2466 return difffn(opts, None)
2466 2467
2467 2468 def difflabel(func, *args, **kw):
2468 2469 '''yields 2-tuples of (output, label) based on the output of func()'''
2469 2470 inlinecolor = False
2470 2471 if kw.get(r'opts'):
2471 2472 inlinecolor = kw[r'opts'].worddiff
2472 2473 headprefixes = [('diff', 'diff.diffline'),
2473 2474 ('copy', 'diff.extended'),
2474 2475 ('rename', 'diff.extended'),
2475 2476 ('old', 'diff.extended'),
2476 2477 ('new', 'diff.extended'),
2477 2478 ('deleted', 'diff.extended'),
2478 2479 ('index', 'diff.extended'),
2479 2480 ('similarity', 'diff.extended'),
2480 2481 ('---', 'diff.file_a'),
2481 2482 ('+++', 'diff.file_b')]
2482 2483 textprefixes = [('@', 'diff.hunk'),
2483 2484 ('-', 'diff.deleted'),
2484 2485 ('+', 'diff.inserted')]
2485 2486 head = False
2486 2487 for chunk in func(*args, **kw):
2487 2488 lines = chunk.split('\n')
2488 2489 matches = {}
2489 2490 if inlinecolor:
2490 2491 matches = _findmatches(lines)
2491 2492 for i, line in enumerate(lines):
2492 2493 if i != 0:
2493 2494 yield ('\n', '')
2494 2495 if head:
2495 2496 if line.startswith('@'):
2496 2497 head = False
2497 2498 else:
2498 2499 if line and line[0] not in ' +-@\\':
2499 2500 head = True
2500 2501 stripline = line
2501 2502 diffline = False
2502 2503 if not head and line and line[0] in '+-':
2503 2504 # highlight tabs and trailing whitespace, but only in
2504 2505 # changed lines
2505 2506 stripline = line.rstrip()
2506 2507 diffline = True
2507 2508
2508 2509 prefixes = textprefixes
2509 2510 if head:
2510 2511 prefixes = headprefixes
2511 2512 for prefix, label in prefixes:
2512 2513 if stripline.startswith(prefix):
2513 2514 if diffline:
2514 2515 if i in matches:
2515 2516 for t, l in _inlinediff(lines[i].rstrip(),
2516 2517 lines[matches[i]].rstrip(),
2517 2518 label):
2518 2519 yield (t, l)
2519 2520 else:
2520 2521 for token in tabsplitter.findall(stripline):
2521 2522 if '\t' == token[0]:
2522 2523 yield (token, 'diff.tab')
2523 2524 else:
2524 2525 yield (token, label)
2525 2526 else:
2526 2527 yield (stripline, label)
2527 2528 break
2528 2529 else:
2529 2530 yield (line, '')
2530 2531 if line != stripline:
2531 2532 yield (line[len(stripline):], 'diff.trailingwhitespace')
2532 2533
2533 2534 def _findmatches(slist):
2534 2535 '''Look for insertion matches to deletion and returns a dict of
2535 2536 correspondences.
2536 2537 '''
2537 2538 lastmatch = 0
2538 2539 matches = {}
2539 2540 for i, line in enumerate(slist):
2540 2541 if line == '':
2541 2542 continue
2542 2543 if line[0] == '-':
2543 2544 lastmatch = max(lastmatch, i)
2544 2545 newgroup = False
2545 2546 for j, newline in enumerate(slist[lastmatch + 1:]):
2546 2547 if newline == '':
2547 2548 continue
2548 2549 if newline[0] == '-' and newgroup: # too far, no match
2549 2550 break
2550 2551 if newline[0] == '+': # potential match
2551 2552 newgroup = True
2552 2553 sim = difflib.SequenceMatcher(None, line, newline).ratio()
2553 2554 if sim > 0.7:
2554 2555 lastmatch = lastmatch + 1 + j
2555 2556 matches[i] = lastmatch
2556 2557 matches[lastmatch] = i
2557 2558 break
2558 2559 return matches
2559 2560
2560 2561 def _inlinediff(s1, s2, operation):
2561 2562 '''Perform string diff to highlight specific changes.'''
2562 2563 operation_skip = '+?' if operation == 'diff.deleted' else '-?'
2563 2564 if operation == 'diff.deleted':
2564 2565 s2, s1 = s1, s2
2565 2566
2566 2567 buff = []
2567 2568 # we never want to higlight the leading +-
2568 2569 if operation == 'diff.deleted' and s2.startswith('-'):
2569 2570 label = operation
2570 2571 token = '-'
2571 2572 s2 = s2[1:]
2572 2573 s1 = s1[1:]
2573 2574 elif operation == 'diff.inserted' and s1.startswith('+'):
2574 2575 label = operation
2575 2576 token = '+'
2576 2577 s2 = s2[1:]
2577 2578 s1 = s1[1:]
2578 2579 else:
2579 2580 raise error.ProgrammingError("Case not expected, operation = %s" %
2580 2581 operation)
2581 2582
2582 2583 s = difflib.ndiff(_nonwordre.split(s2), _nonwordre.split(s1))
2583 2584 for part in s:
2584 2585 if part[0] in operation_skip or len(part) == 2:
2585 2586 continue
2586 2587 l = operation + '.highlight'
2587 2588 if part[0] in ' ':
2588 2589 l = operation
2589 2590 if part[2:] == '\t':
2590 2591 l = 'diff.tab'
2591 2592 if l == label: # contiguous token with same label
2592 2593 token += part[2:]
2593 2594 continue
2594 2595 else:
2595 2596 buff.append((token, label))
2596 2597 label = l
2597 2598 token = part[2:]
2598 2599 buff.append((token, label))
2599 2600
2600 2601 return buff
2601 2602
2602 2603 def diffui(*args, **kw):
2603 2604 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2604 2605 return difflabel(diff, *args, **kw)
2605 2606
2606 2607 def _filepairs(modified, added, removed, copy, opts):
2607 2608 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2608 2609 before and f2 is the the name after. For added files, f1 will be None,
2609 2610 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2610 2611 or 'rename' (the latter two only if opts.git is set).'''
2611 2612 gone = set()
2612 2613
2613 2614 copyto = dict([(v, k) for k, v in copy.items()])
2614 2615
2615 2616 addedset, removedset = set(added), set(removed)
2616 2617
2617 2618 for f in sorted(modified + added + removed):
2618 2619 copyop = None
2619 2620 f1, f2 = f, f
2620 2621 if f in addedset:
2621 2622 f1 = None
2622 2623 if f in copy:
2623 2624 if opts.git:
2624 2625 f1 = copy[f]
2625 2626 if f1 in removedset and f1 not in gone:
2626 2627 copyop = 'rename'
2627 2628 gone.add(f1)
2628 2629 else:
2629 2630 copyop = 'copy'
2630 2631 elif f in removedset:
2631 2632 f2 = None
2632 2633 if opts.git:
2633 2634 # have we already reported a copy above?
2634 2635 if (f in copyto and copyto[f] in addedset
2635 2636 and copy[copyto[f]] == f):
2636 2637 continue
2637 2638 yield f1, f2, copyop
2638 2639
2639 2640 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2640 2641 copy, getfilectx, opts, losedatafn, prefix, relroot):
2641 2642 '''given input data, generate a diff and yield it in blocks
2642 2643
2643 2644 If generating a diff would lose data like flags or binary data and
2644 2645 losedatafn is not None, it will be called.
2645 2646
2646 2647 relroot is removed and prefix is added to every path in the diff output.
2647 2648
2648 2649 If relroot is not empty, this function expects every path in modified,
2649 2650 added, removed and copy to start with it.'''
2650 2651
2651 2652 def gitindex(text):
2652 2653 if not text:
2653 2654 text = ""
2654 2655 l = len(text)
2655 2656 s = hashlib.sha1('blob %d\0' % l)
2656 2657 s.update(text)
2657 2658 return hex(s.digest())
2658 2659
2659 2660 if opts.noprefix:
2660 2661 aprefix = bprefix = ''
2661 2662 else:
2662 2663 aprefix = 'a/'
2663 2664 bprefix = 'b/'
2664 2665
2665 2666 def diffline(f, revs):
2666 2667 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2667 2668 return 'diff %s %s' % (revinfo, f)
2668 2669
2669 2670 def isempty(fctx):
2670 2671 return fctx is None or fctx.size() == 0
2671 2672
2672 2673 date1 = util.datestr(ctx1.date())
2673 2674 date2 = util.datestr(ctx2.date())
2674 2675
2675 2676 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2676 2677
2677 2678 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2678 2679 or repo.ui.configbool('devel', 'check-relroot')):
2679 2680 for f in modified + added + removed + list(copy) + list(copy.values()):
2680 2681 if f is not None and not f.startswith(relroot):
2681 2682 raise AssertionError(
2682 2683 "file %s doesn't start with relroot %s" % (f, relroot))
2683 2684
2684 2685 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2685 2686 content1 = None
2686 2687 content2 = None
2687 2688 fctx1 = None
2688 2689 fctx2 = None
2689 2690 flag1 = None
2690 2691 flag2 = None
2691 2692 if f1:
2692 2693 fctx1 = getfilectx(f1, ctx1)
2693 2694 if opts.git or losedatafn:
2694 2695 flag1 = ctx1.flags(f1)
2695 2696 if f2:
2696 2697 fctx2 = getfilectx(f2, ctx2)
2697 2698 if opts.git or losedatafn:
2698 2699 flag2 = ctx2.flags(f2)
2699 2700 # if binary is True, output "summary" or "base85", but not "text diff"
2700 2701 binary = not opts.text and any(f.isbinary()
2701 2702 for f in [fctx1, fctx2] if f is not None)
2702 2703
2703 2704 if losedatafn and not opts.git:
2704 2705 if (binary or
2705 2706 # copy/rename
2706 2707 f2 in copy or
2707 2708 # empty file creation
2708 2709 (not f1 and isempty(fctx2)) or
2709 2710 # empty file deletion
2710 2711 (isempty(fctx1) and not f2) or
2711 2712 # create with flags
2712 2713 (not f1 and flag2) or
2713 2714 # change flags
2714 2715 (f1 and f2 and flag1 != flag2)):
2715 2716 losedatafn(f2 or f1)
2716 2717
2717 2718 path1 = f1 or f2
2718 2719 path2 = f2 or f1
2719 2720 path1 = posixpath.join(prefix, path1[len(relroot):])
2720 2721 path2 = posixpath.join(prefix, path2[len(relroot):])
2721 2722 header = []
2722 2723 if opts.git:
2723 2724 header.append('diff --git %s%s %s%s' %
2724 2725 (aprefix, path1, bprefix, path2))
2725 2726 if not f1: # added
2726 2727 header.append('new file mode %s' % gitmode[flag2])
2727 2728 elif not f2: # removed
2728 2729 header.append('deleted file mode %s' % gitmode[flag1])
2729 2730 else: # modified/copied/renamed
2730 2731 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2731 2732 if mode1 != mode2:
2732 2733 header.append('old mode %s' % mode1)
2733 2734 header.append('new mode %s' % mode2)
2734 2735 if copyop is not None:
2735 2736 if opts.showsimilarity:
2736 2737 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2737 2738 header.append('similarity index %d%%' % sim)
2738 2739 header.append('%s from %s' % (copyop, path1))
2739 2740 header.append('%s to %s' % (copyop, path2))
2740 2741 elif revs and not repo.ui.quiet:
2741 2742 header.append(diffline(path1, revs))
2742 2743
2743 2744 # fctx.is | diffopts | what to | is fctx.data()
2744 2745 # binary() | text nobinary git index | output? | outputted?
2745 2746 # ------------------------------------|----------------------------
2746 2747 # yes | no no no * | summary | no
2747 2748 # yes | no no yes * | base85 | yes
2748 2749 # yes | no yes no * | summary | no
2749 2750 # yes | no yes yes 0 | summary | no
2750 2751 # yes | no yes yes >0 | summary | semi [1]
2751 2752 # yes | yes * * * | text diff | yes
2752 2753 # no | * * * * | text diff | yes
2753 2754 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2754 2755 if binary and (not opts.git or (opts.git and opts.nobinary and not
2755 2756 opts.index)):
2756 2757 # fast path: no binary content will be displayed, content1 and
2757 2758 # content2 are only used for equivalent test. cmp() could have a
2758 2759 # fast path.
2759 2760 if fctx1 is not None:
2760 2761 content1 = b'\0'
2761 2762 if fctx2 is not None:
2762 2763 if fctx1 is not None and not fctx1.cmp(fctx2):
2763 2764 content2 = b'\0' # not different
2764 2765 else:
2765 2766 content2 = b'\0\0'
2766 2767 else:
2767 2768 # normal path: load contents
2768 2769 if fctx1 is not None:
2769 2770 content1 = fctx1.data()
2770 2771 if fctx2 is not None:
2771 2772 content2 = fctx2.data()
2772 2773
2773 2774 if binary and opts.git and not opts.nobinary:
2774 2775 text = mdiff.b85diff(content1, content2)
2775 2776 if text:
2776 2777 header.append('index %s..%s' %
2777 2778 (gitindex(content1), gitindex(content2)))
2778 2779 hunks = (None, [text]),
2779 2780 else:
2780 2781 if opts.git and opts.index > 0:
2781 2782 flag = flag1
2782 2783 if flag is None:
2783 2784 flag = flag2
2784 2785 header.append('index %s..%s %s' %
2785 2786 (gitindex(content1)[0:opts.index],
2786 2787 gitindex(content2)[0:opts.index],
2787 2788 gitmode[flag]))
2788 2789
2789 2790 uheaders, hunks = mdiff.unidiff(content1, date1,
2790 2791 content2, date2,
2791 2792 path1, path2, opts=opts)
2792 2793 header.extend(uheaders)
2793 2794 yield fctx1, fctx2, header, hunks
2794 2795
2795 2796 def diffstatsum(stats):
2796 2797 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2797 2798 for f, a, r, b in stats:
2798 2799 maxfile = max(maxfile, encoding.colwidth(f))
2799 2800 maxtotal = max(maxtotal, a + r)
2800 2801 addtotal += a
2801 2802 removetotal += r
2802 2803 binary = binary or b
2803 2804
2804 2805 return maxfile, maxtotal, addtotal, removetotal, binary
2805 2806
2806 2807 def diffstatdata(lines):
2807 2808 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2808 2809
2809 2810 results = []
2810 2811 filename, adds, removes, isbinary = None, 0, 0, False
2811 2812
2812 2813 def addresult():
2813 2814 if filename:
2814 2815 results.append((filename, adds, removes, isbinary))
2815 2816
2816 2817 # inheader is used to track if a line is in the
2817 2818 # header portion of the diff. This helps properly account
2818 2819 # for lines that start with '--' or '++'
2819 2820 inheader = False
2820 2821
2821 2822 for line in lines:
2822 2823 if line.startswith('diff'):
2823 2824 addresult()
2824 2825 # starting a new file diff
2825 2826 # set numbers to 0 and reset inheader
2826 2827 inheader = True
2827 2828 adds, removes, isbinary = 0, 0, False
2828 2829 if line.startswith('diff --git a/'):
2829 2830 filename = gitre.search(line).group(2)
2830 2831 elif line.startswith('diff -r'):
2831 2832 # format: "diff -r ... -r ... filename"
2832 2833 filename = diffre.search(line).group(1)
2833 2834 elif line.startswith('@@'):
2834 2835 inheader = False
2835 2836 elif line.startswith('+') and not inheader:
2836 2837 adds += 1
2837 2838 elif line.startswith('-') and not inheader:
2838 2839 removes += 1
2839 2840 elif (line.startswith('GIT binary patch') or
2840 2841 line.startswith('Binary file')):
2841 2842 isbinary = True
2842 2843 addresult()
2843 2844 return results
2844 2845
2845 2846 def diffstat(lines, width=80):
2846 2847 output = []
2847 2848 stats = diffstatdata(lines)
2848 2849 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2849 2850
2850 2851 countwidth = len(str(maxtotal))
2851 2852 if hasbinary and countwidth < 3:
2852 2853 countwidth = 3
2853 2854 graphwidth = width - countwidth - maxname - 6
2854 2855 if graphwidth < 10:
2855 2856 graphwidth = 10
2856 2857
2857 2858 def scale(i):
2858 2859 if maxtotal <= graphwidth:
2859 2860 return i
2860 2861 # If diffstat runs out of room it doesn't print anything,
2861 2862 # which isn't very useful, so always print at least one + or -
2862 2863 # if there were at least some changes.
2863 2864 return max(i * graphwidth // maxtotal, int(bool(i)))
2864 2865
2865 2866 for filename, adds, removes, isbinary in stats:
2866 2867 if isbinary:
2867 2868 count = 'Bin'
2868 2869 else:
2869 2870 count = '%d' % (adds + removes)
2870 2871 pluses = '+' * scale(adds)
2871 2872 minuses = '-' * scale(removes)
2872 2873 output.append(' %s%s | %*s %s%s\n' %
2873 2874 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2874 2875 countwidth, count, pluses, minuses))
2875 2876
2876 2877 if stats:
2877 2878 output.append(_(' %d files changed, %d insertions(+), '
2878 2879 '%d deletions(-)\n')
2879 2880 % (len(stats), totaladds, totalremoves))
2880 2881
2881 2882 return ''.join(output)
2882 2883
2883 2884 def diffstatui(*args, **kw):
2884 2885 '''like diffstat(), but yields 2-tuples of (output, label) for
2885 2886 ui.write()
2886 2887 '''
2887 2888
2888 2889 for line in diffstat(*args, **kw).splitlines():
2889 2890 if line and line[-1] in '+-':
2890 2891 name, graph = line.rsplit(' ', 1)
2891 2892 yield (name + ' ', '')
2892 2893 m = re.search(br'\++', graph)
2893 2894 if m:
2894 2895 yield (m.group(0), 'diffstat.inserted')
2895 2896 m = re.search(br'-+', graph)
2896 2897 if m:
2897 2898 yield (m.group(0), 'diffstat.deleted')
2898 2899 else:
2899 2900 yield (line, '')
2900 2901 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now