##// END OF EJS Templates
patch: extract fs access from patchfile into fsbackend...
Patrick Mezard -
r14348:c1c71910 default
parent child Browse files
Show More
@@ -1,691 +1,691 b''
1 # keyword.py - $Keyword$ expansion for Mercurial
1 # keyword.py - $Keyword$ expansion for Mercurial
2 #
2 #
3 # Copyright 2007-2010 Christian Ebert <blacktrash@gmx.net>
3 # Copyright 2007-2010 Christian Ebert <blacktrash@gmx.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 #
7 #
8 # $Id$
8 # $Id$
9 #
9 #
10 # Keyword expansion hack against the grain of a DSCM
10 # Keyword expansion hack against the grain of a DSCM
11 #
11 #
12 # There are many good reasons why this is not needed in a distributed
12 # There are many good reasons why this is not needed in a distributed
13 # SCM, still it may be useful in very small projects based on single
13 # SCM, still it may be useful in very small projects based on single
14 # files (like LaTeX packages), that are mostly addressed to an
14 # files (like LaTeX packages), that are mostly addressed to an
15 # audience not running a version control system.
15 # audience not running a version control system.
16 #
16 #
17 # For in-depth discussion refer to
17 # For in-depth discussion refer to
18 # <http://mercurial.selenic.com/wiki/KeywordPlan>.
18 # <http://mercurial.selenic.com/wiki/KeywordPlan>.
19 #
19 #
20 # Keyword expansion is based on Mercurial's changeset template mappings.
20 # Keyword expansion is based on Mercurial's changeset template mappings.
21 #
21 #
22 # Binary files are not touched.
22 # Binary files are not touched.
23 #
23 #
24 # Files to act upon/ignore are specified in the [keyword] section.
24 # Files to act upon/ignore are specified in the [keyword] section.
25 # Customized keyword template mappings in the [keywordmaps] section.
25 # Customized keyword template mappings in the [keywordmaps] section.
26 #
26 #
27 # Run "hg help keyword" and "hg kwdemo" to get info on configuration.
27 # Run "hg help keyword" and "hg kwdemo" to get info on configuration.
28
28
29 '''expand keywords in tracked files
29 '''expand keywords in tracked files
30
30
31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
32 tracked text files selected by your configuration.
32 tracked text files selected by your configuration.
33
33
34 Keywords are only expanded in local repositories and not stored in the
34 Keywords are only expanded in local repositories and not stored in the
35 change history. The mechanism can be regarded as a convenience for the
35 change history. The mechanism can be regarded as a convenience for the
36 current user or for archive distribution.
36 current user or for archive distribution.
37
37
38 Keywords expand to the changeset data pertaining to the latest change
38 Keywords expand to the changeset data pertaining to the latest change
39 relative to the working directory parent of each file.
39 relative to the working directory parent of each file.
40
40
41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
42 sections of hgrc files.
42 sections of hgrc files.
43
43
44 Example::
44 Example::
45
45
46 [keyword]
46 [keyword]
47 # expand keywords in every python file except those matching "x*"
47 # expand keywords in every python file except those matching "x*"
48 **.py =
48 **.py =
49 x* = ignore
49 x* = ignore
50
50
51 [keywordset]
51 [keywordset]
52 # prefer svn- over cvs-like default keywordmaps
52 # prefer svn- over cvs-like default keywordmaps
53 svn = True
53 svn = True
54
54
55 .. note::
55 .. note::
56 The more specific you are in your filename patterns the less you
56 The more specific you are in your filename patterns the less you
57 lose speed in huge repositories.
57 lose speed in huge repositories.
58
58
59 For [keywordmaps] template mapping and expansion demonstration and
59 For [keywordmaps] template mapping and expansion demonstration and
60 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
60 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
61 available templates and filters.
61 available templates and filters.
62
62
63 Three additional date template filters are provided:
63 Three additional date template filters are provided:
64
64
65 :``utcdate``: "2006/09/18 15:13:13"
65 :``utcdate``: "2006/09/18 15:13:13"
66 :``svnutcdate``: "2006-09-18 15:13:13Z"
66 :``svnutcdate``: "2006-09-18 15:13:13Z"
67 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
67 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
68
68
69 The default template mappings (view with :hg:`kwdemo -d`) can be
69 The default template mappings (view with :hg:`kwdemo -d`) can be
70 replaced with customized keywords and templates. Again, run
70 replaced with customized keywords and templates. Again, run
71 :hg:`kwdemo` to control the results of your configuration changes.
71 :hg:`kwdemo` to control the results of your configuration changes.
72
72
73 Before changing/disabling active keywords, you must run :hg:`kwshrink`
73 Before changing/disabling active keywords, you must run :hg:`kwshrink`
74 to avoid storing expanded keywords in the change history.
74 to avoid storing expanded keywords in the change history.
75
75
76 To force expansion after enabling it, or a configuration change, run
76 To force expansion after enabling it, or a configuration change, run
77 :hg:`kwexpand`.
77 :hg:`kwexpand`.
78
78
79 Expansions spanning more than one line and incremental expansions,
79 Expansions spanning more than one line and incremental expansions,
80 like CVS' $Log$, are not supported. A keyword template map "Log =
80 like CVS' $Log$, are not supported. A keyword template map "Log =
81 {desc}" expands to the first line of the changeset description.
81 {desc}" expands to the first line of the changeset description.
82 '''
82 '''
83
83
84 from mercurial import commands, context, cmdutil, dispatch, filelog, extensions
84 from mercurial import commands, context, cmdutil, dispatch, filelog, extensions
85 from mercurial import localrepo, match, patch, templatefilters, templater, util
85 from mercurial import localrepo, match, patch, templatefilters, templater, util
86 from mercurial import scmutil
86 from mercurial import scmutil
87 from mercurial.hgweb import webcommands
87 from mercurial.hgweb import webcommands
88 from mercurial.i18n import _
88 from mercurial.i18n import _
89 import os, re, shutil, tempfile
89 import os, re, shutil, tempfile
90
90
91 commands.optionalrepo += ' kwdemo'
91 commands.optionalrepo += ' kwdemo'
92
92
93 cmdtable = {}
93 cmdtable = {}
94 command = cmdutil.command(cmdtable)
94 command = cmdutil.command(cmdtable)
95
95
96 # hg commands that do not act on keywords
96 # hg commands that do not act on keywords
97 nokwcommands = ('add addremove annotate bundle export grep incoming init log'
97 nokwcommands = ('add addremove annotate bundle export grep incoming init log'
98 ' outgoing push tip verify convert email glog')
98 ' outgoing push tip verify convert email glog')
99
99
100 # hg commands that trigger expansion only when writing to working dir,
100 # hg commands that trigger expansion only when writing to working dir,
101 # not when reading filelog, and unexpand when reading from working dir
101 # not when reading filelog, and unexpand when reading from working dir
102 restricted = 'merge kwexpand kwshrink record qrecord resolve transplant'
102 restricted = 'merge kwexpand kwshrink record qrecord resolve transplant'
103
103
104 # names of extensions using dorecord
104 # names of extensions using dorecord
105 recordextensions = 'record'
105 recordextensions = 'record'
106
106
107 colortable = {
107 colortable = {
108 'kwfiles.enabled': 'green bold',
108 'kwfiles.enabled': 'green bold',
109 'kwfiles.deleted': 'cyan bold underline',
109 'kwfiles.deleted': 'cyan bold underline',
110 'kwfiles.enabledunknown': 'green',
110 'kwfiles.enabledunknown': 'green',
111 'kwfiles.ignored': 'bold',
111 'kwfiles.ignored': 'bold',
112 'kwfiles.ignoredunknown': 'none'
112 'kwfiles.ignoredunknown': 'none'
113 }
113 }
114
114
115 # date like in cvs' $Date
115 # date like in cvs' $Date
116 def utcdate(text):
116 def utcdate(text):
117 ''':utcdate: Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
117 ''':utcdate: Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
118 '''
118 '''
119 return util.datestr((text[0], 0), '%Y/%m/%d %H:%M:%S')
119 return util.datestr((text[0], 0), '%Y/%m/%d %H:%M:%S')
120 # date like in svn's $Date
120 # date like in svn's $Date
121 def svnisodate(text):
121 def svnisodate(text):
122 ''':svnisodate: Date. Returns a date in this format: "2009-08-18 13:00:13
122 ''':svnisodate: Date. Returns a date in this format: "2009-08-18 13:00:13
123 +0200 (Tue, 18 Aug 2009)".
123 +0200 (Tue, 18 Aug 2009)".
124 '''
124 '''
125 return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
125 return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
126 # date like in svn's $Id
126 # date like in svn's $Id
127 def svnutcdate(text):
127 def svnutcdate(text):
128 ''':svnutcdate: Date. Returns a UTC-date in this format: "2009-08-18
128 ''':svnutcdate: Date. Returns a UTC-date in this format: "2009-08-18
129 11:00:13Z".
129 11:00:13Z".
130 '''
130 '''
131 return util.datestr((text[0], 0), '%Y-%m-%d %H:%M:%SZ')
131 return util.datestr((text[0], 0), '%Y-%m-%d %H:%M:%SZ')
132
132
133 templatefilters.filters.update({'utcdate': utcdate,
133 templatefilters.filters.update({'utcdate': utcdate,
134 'svnisodate': svnisodate,
134 'svnisodate': svnisodate,
135 'svnutcdate': svnutcdate})
135 'svnutcdate': svnutcdate})
136
136
137 # make keyword tools accessible
137 # make keyword tools accessible
138 kwtools = {'templater': None, 'hgcmd': ''}
138 kwtools = {'templater': None, 'hgcmd': ''}
139
139
140 def _defaultkwmaps(ui):
140 def _defaultkwmaps(ui):
141 '''Returns default keywordmaps according to keywordset configuration.'''
141 '''Returns default keywordmaps according to keywordset configuration.'''
142 templates = {
142 templates = {
143 'Revision': '{node|short}',
143 'Revision': '{node|short}',
144 'Author': '{author|user}',
144 'Author': '{author|user}',
145 }
145 }
146 kwsets = ({
146 kwsets = ({
147 'Date': '{date|utcdate}',
147 'Date': '{date|utcdate}',
148 'RCSfile': '{file|basename},v',
148 'RCSfile': '{file|basename},v',
149 'RCSFile': '{file|basename},v', # kept for backwards compatibility
149 'RCSFile': '{file|basename},v', # kept for backwards compatibility
150 # with hg-keyword
150 # with hg-keyword
151 'Source': '{root}/{file},v',
151 'Source': '{root}/{file},v',
152 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
152 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
153 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
153 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
154 }, {
154 }, {
155 'Date': '{date|svnisodate}',
155 'Date': '{date|svnisodate}',
156 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
156 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
157 'LastChangedRevision': '{node|short}',
157 'LastChangedRevision': '{node|short}',
158 'LastChangedBy': '{author|user}',
158 'LastChangedBy': '{author|user}',
159 'LastChangedDate': '{date|svnisodate}',
159 'LastChangedDate': '{date|svnisodate}',
160 })
160 })
161 templates.update(kwsets[ui.configbool('keywordset', 'svn')])
161 templates.update(kwsets[ui.configbool('keywordset', 'svn')])
162 return templates
162 return templates
163
163
164 def _shrinktext(text, subfunc):
164 def _shrinktext(text, subfunc):
165 '''Helper for keyword expansion removal in text.
165 '''Helper for keyword expansion removal in text.
166 Depending on subfunc also returns number of substitutions.'''
166 Depending on subfunc also returns number of substitutions.'''
167 return subfunc(r'$\1$', text)
167 return subfunc(r'$\1$', text)
168
168
169 def _preselect(wstatus, changed):
169 def _preselect(wstatus, changed):
170 '''Retrieves modfied and added files from a working directory state
170 '''Retrieves modfied and added files from a working directory state
171 and returns the subset of each contained in given changed files
171 and returns the subset of each contained in given changed files
172 retrieved from a change context.'''
172 retrieved from a change context.'''
173 modified, added = wstatus[:2]
173 modified, added = wstatus[:2]
174 modified = [f for f in modified if f in changed]
174 modified = [f for f in modified if f in changed]
175 added = [f for f in added if f in changed]
175 added = [f for f in added if f in changed]
176 return modified, added
176 return modified, added
177
177
178
178
179 class kwtemplater(object):
179 class kwtemplater(object):
180 '''
180 '''
181 Sets up keyword templates, corresponding keyword regex, and
181 Sets up keyword templates, corresponding keyword regex, and
182 provides keyword substitution functions.
182 provides keyword substitution functions.
183 '''
183 '''
184
184
185 def __init__(self, ui, repo, inc, exc):
185 def __init__(self, ui, repo, inc, exc):
186 self.ui = ui
186 self.ui = ui
187 self.repo = repo
187 self.repo = repo
188 self.match = match.match(repo.root, '', [], inc, exc)
188 self.match = match.match(repo.root, '', [], inc, exc)
189 self.restrict = kwtools['hgcmd'] in restricted.split()
189 self.restrict = kwtools['hgcmd'] in restricted.split()
190 self.record = False
190 self.record = False
191
191
192 kwmaps = self.ui.configitems('keywordmaps')
192 kwmaps = self.ui.configitems('keywordmaps')
193 if kwmaps: # override default templates
193 if kwmaps: # override default templates
194 self.templates = dict((k, templater.parsestring(v, False))
194 self.templates = dict((k, templater.parsestring(v, False))
195 for k, v in kwmaps)
195 for k, v in kwmaps)
196 else:
196 else:
197 self.templates = _defaultkwmaps(self.ui)
197 self.templates = _defaultkwmaps(self.ui)
198
198
199 @util.propertycache
199 @util.propertycache
200 def escape(self):
200 def escape(self):
201 '''Returns bar-separated and escaped keywords.'''
201 '''Returns bar-separated and escaped keywords.'''
202 return '|'.join(map(re.escape, self.templates.keys()))
202 return '|'.join(map(re.escape, self.templates.keys()))
203
203
204 @util.propertycache
204 @util.propertycache
205 def rekw(self):
205 def rekw(self):
206 '''Returns regex for unexpanded keywords.'''
206 '''Returns regex for unexpanded keywords.'''
207 return re.compile(r'\$(%s)\$' % self.escape)
207 return re.compile(r'\$(%s)\$' % self.escape)
208
208
209 @util.propertycache
209 @util.propertycache
210 def rekwexp(self):
210 def rekwexp(self):
211 '''Returns regex for expanded keywords.'''
211 '''Returns regex for expanded keywords.'''
212 return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape)
212 return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape)
213
213
214 def substitute(self, data, path, ctx, subfunc):
214 def substitute(self, data, path, ctx, subfunc):
215 '''Replaces keywords in data with expanded template.'''
215 '''Replaces keywords in data with expanded template.'''
216 def kwsub(mobj):
216 def kwsub(mobj):
217 kw = mobj.group(1)
217 kw = mobj.group(1)
218 ct = cmdutil.changeset_templater(self.ui, self.repo,
218 ct = cmdutil.changeset_templater(self.ui, self.repo,
219 False, None, '', False)
219 False, None, '', False)
220 ct.use_template(self.templates[kw])
220 ct.use_template(self.templates[kw])
221 self.ui.pushbuffer()
221 self.ui.pushbuffer()
222 ct.show(ctx, root=self.repo.root, file=path)
222 ct.show(ctx, root=self.repo.root, file=path)
223 ekw = templatefilters.firstline(self.ui.popbuffer())
223 ekw = templatefilters.firstline(self.ui.popbuffer())
224 return '$%s: %s $' % (kw, ekw)
224 return '$%s: %s $' % (kw, ekw)
225 return subfunc(kwsub, data)
225 return subfunc(kwsub, data)
226
226
227 def linkctx(self, path, fileid):
227 def linkctx(self, path, fileid):
228 '''Similar to filelog.linkrev, but returns a changectx.'''
228 '''Similar to filelog.linkrev, but returns a changectx.'''
229 return self.repo.filectx(path, fileid=fileid).changectx()
229 return self.repo.filectx(path, fileid=fileid).changectx()
230
230
231 def expand(self, path, node, data):
231 def expand(self, path, node, data):
232 '''Returns data with keywords expanded.'''
232 '''Returns data with keywords expanded.'''
233 if not self.restrict and self.match(path) and not util.binary(data):
233 if not self.restrict and self.match(path) and not util.binary(data):
234 ctx = self.linkctx(path, node)
234 ctx = self.linkctx(path, node)
235 return self.substitute(data, path, ctx, self.rekw.sub)
235 return self.substitute(data, path, ctx, self.rekw.sub)
236 return data
236 return data
237
237
238 def iskwfile(self, cand, ctx):
238 def iskwfile(self, cand, ctx):
239 '''Returns subset of candidates which are configured for keyword
239 '''Returns subset of candidates which are configured for keyword
240 expansion are not symbolic links.'''
240 expansion are not symbolic links.'''
241 return [f for f in cand if self.match(f) and not 'l' in ctx.flags(f)]
241 return [f for f in cand if self.match(f) and not 'l' in ctx.flags(f)]
242
242
243 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
243 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
244 '''Overwrites selected files expanding/shrinking keywords.'''
244 '''Overwrites selected files expanding/shrinking keywords.'''
245 if self.restrict or lookup or self.record: # exclude kw_copy
245 if self.restrict or lookup or self.record: # exclude kw_copy
246 candidates = self.iskwfile(candidates, ctx)
246 candidates = self.iskwfile(candidates, ctx)
247 if not candidates:
247 if not candidates:
248 return
248 return
249 kwcmd = self.restrict and lookup # kwexpand/kwshrink
249 kwcmd = self.restrict and lookup # kwexpand/kwshrink
250 if self.restrict or expand and lookup:
250 if self.restrict or expand and lookup:
251 mf = ctx.manifest()
251 mf = ctx.manifest()
252 lctx = ctx
252 lctx = ctx
253 re_kw = (self.restrict or rekw) and self.rekw or self.rekwexp
253 re_kw = (self.restrict or rekw) and self.rekw or self.rekwexp
254 msg = (expand and _('overwriting %s expanding keywords\n')
254 msg = (expand and _('overwriting %s expanding keywords\n')
255 or _('overwriting %s shrinking keywords\n'))
255 or _('overwriting %s shrinking keywords\n'))
256 for f in candidates:
256 for f in candidates:
257 if self.restrict:
257 if self.restrict:
258 data = self.repo.file(f).read(mf[f])
258 data = self.repo.file(f).read(mf[f])
259 else:
259 else:
260 data = self.repo.wread(f)
260 data = self.repo.wread(f)
261 if util.binary(data):
261 if util.binary(data):
262 continue
262 continue
263 if expand:
263 if expand:
264 if lookup:
264 if lookup:
265 lctx = self.linkctx(f, mf[f])
265 lctx = self.linkctx(f, mf[f])
266 data, found = self.substitute(data, f, lctx, re_kw.subn)
266 data, found = self.substitute(data, f, lctx, re_kw.subn)
267 elif self.restrict:
267 elif self.restrict:
268 found = re_kw.search(data)
268 found = re_kw.search(data)
269 else:
269 else:
270 data, found = _shrinktext(data, re_kw.subn)
270 data, found = _shrinktext(data, re_kw.subn)
271 if found:
271 if found:
272 self.ui.note(msg % f)
272 self.ui.note(msg % f)
273 self.repo.wwrite(f, data, ctx.flags(f))
273 self.repo.wwrite(f, data, ctx.flags(f))
274 if kwcmd:
274 if kwcmd:
275 self.repo.dirstate.normal(f)
275 self.repo.dirstate.normal(f)
276 elif self.record:
276 elif self.record:
277 self.repo.dirstate.normallookup(f)
277 self.repo.dirstate.normallookup(f)
278
278
279 def shrink(self, fname, text):
279 def shrink(self, fname, text):
280 '''Returns text with all keyword substitutions removed.'''
280 '''Returns text with all keyword substitutions removed.'''
281 if self.match(fname) and not util.binary(text):
281 if self.match(fname) and not util.binary(text):
282 return _shrinktext(text, self.rekwexp.sub)
282 return _shrinktext(text, self.rekwexp.sub)
283 return text
283 return text
284
284
285 def shrinklines(self, fname, lines):
285 def shrinklines(self, fname, lines):
286 '''Returns lines with keyword substitutions removed.'''
286 '''Returns lines with keyword substitutions removed.'''
287 if self.match(fname):
287 if self.match(fname):
288 text = ''.join(lines)
288 text = ''.join(lines)
289 if not util.binary(text):
289 if not util.binary(text):
290 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
290 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
291 return lines
291 return lines
292
292
293 def wread(self, fname, data):
293 def wread(self, fname, data):
294 '''If in restricted mode returns data read from wdir with
294 '''If in restricted mode returns data read from wdir with
295 keyword substitutions removed.'''
295 keyword substitutions removed.'''
296 return self.restrict and self.shrink(fname, data) or data
296 return self.restrict and self.shrink(fname, data) or data
297
297
298 class kwfilelog(filelog.filelog):
298 class kwfilelog(filelog.filelog):
299 '''
299 '''
300 Subclass of filelog to hook into its read, add, cmp methods.
300 Subclass of filelog to hook into its read, add, cmp methods.
301 Keywords are "stored" unexpanded, and processed on reading.
301 Keywords are "stored" unexpanded, and processed on reading.
302 '''
302 '''
303 def __init__(self, opener, kwt, path):
303 def __init__(self, opener, kwt, path):
304 super(kwfilelog, self).__init__(opener, path)
304 super(kwfilelog, self).__init__(opener, path)
305 self.kwt = kwt
305 self.kwt = kwt
306 self.path = path
306 self.path = path
307
307
308 def read(self, node):
308 def read(self, node):
309 '''Expands keywords when reading filelog.'''
309 '''Expands keywords when reading filelog.'''
310 data = super(kwfilelog, self).read(node)
310 data = super(kwfilelog, self).read(node)
311 if self.renamed(node):
311 if self.renamed(node):
312 return data
312 return data
313 return self.kwt.expand(self.path, node, data)
313 return self.kwt.expand(self.path, node, data)
314
314
315 def add(self, text, meta, tr, link, p1=None, p2=None):
315 def add(self, text, meta, tr, link, p1=None, p2=None):
316 '''Removes keyword substitutions when adding to filelog.'''
316 '''Removes keyword substitutions when adding to filelog.'''
317 text = self.kwt.shrink(self.path, text)
317 text = self.kwt.shrink(self.path, text)
318 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
318 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
319
319
320 def cmp(self, node, text):
320 def cmp(self, node, text):
321 '''Removes keyword substitutions for comparison.'''
321 '''Removes keyword substitutions for comparison.'''
322 text = self.kwt.shrink(self.path, text)
322 text = self.kwt.shrink(self.path, text)
323 return super(kwfilelog, self).cmp(node, text)
323 return super(kwfilelog, self).cmp(node, text)
324
324
325 def _status(ui, repo, kwt, *pats, **opts):
325 def _status(ui, repo, kwt, *pats, **opts):
326 '''Bails out if [keyword] configuration is not active.
326 '''Bails out if [keyword] configuration is not active.
327 Returns status of working directory.'''
327 Returns status of working directory.'''
328 if kwt:
328 if kwt:
329 return repo.status(match=scmutil.match(repo, pats, opts), clean=True,
329 return repo.status(match=scmutil.match(repo, pats, opts), clean=True,
330 unknown=opts.get('unknown') or opts.get('all'))
330 unknown=opts.get('unknown') or opts.get('all'))
331 if ui.configitems('keyword'):
331 if ui.configitems('keyword'):
332 raise util.Abort(_('[keyword] patterns cannot match'))
332 raise util.Abort(_('[keyword] patterns cannot match'))
333 raise util.Abort(_('no [keyword] patterns configured'))
333 raise util.Abort(_('no [keyword] patterns configured'))
334
334
335 def _kwfwrite(ui, repo, expand, *pats, **opts):
335 def _kwfwrite(ui, repo, expand, *pats, **opts):
336 '''Selects files and passes them to kwtemplater.overwrite.'''
336 '''Selects files and passes them to kwtemplater.overwrite.'''
337 wctx = repo[None]
337 wctx = repo[None]
338 if len(wctx.parents()) > 1:
338 if len(wctx.parents()) > 1:
339 raise util.Abort(_('outstanding uncommitted merge'))
339 raise util.Abort(_('outstanding uncommitted merge'))
340 kwt = kwtools['templater']
340 kwt = kwtools['templater']
341 wlock = repo.wlock()
341 wlock = repo.wlock()
342 try:
342 try:
343 status = _status(ui, repo, kwt, *pats, **opts)
343 status = _status(ui, repo, kwt, *pats, **opts)
344 modified, added, removed, deleted, unknown, ignored, clean = status
344 modified, added, removed, deleted, unknown, ignored, clean = status
345 if modified or added or removed or deleted:
345 if modified or added or removed or deleted:
346 raise util.Abort(_('outstanding uncommitted changes'))
346 raise util.Abort(_('outstanding uncommitted changes'))
347 kwt.overwrite(wctx, clean, True, expand)
347 kwt.overwrite(wctx, clean, True, expand)
348 finally:
348 finally:
349 wlock.release()
349 wlock.release()
350
350
351 @command('kwdemo',
351 @command('kwdemo',
352 [('d', 'default', None, _('show default keyword template maps')),
352 [('d', 'default', None, _('show default keyword template maps')),
353 ('f', 'rcfile', '',
353 ('f', 'rcfile', '',
354 _('read maps from rcfile'), _('FILE'))],
354 _('read maps from rcfile'), _('FILE'))],
355 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'))
355 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'))
356 def demo(ui, repo, *args, **opts):
356 def demo(ui, repo, *args, **opts):
357 '''print [keywordmaps] configuration and an expansion example
357 '''print [keywordmaps] configuration and an expansion example
358
358
359 Show current, custom, or default keyword template maps and their
359 Show current, custom, or default keyword template maps and their
360 expansions.
360 expansions.
361
361
362 Extend the current configuration by specifying maps as arguments
362 Extend the current configuration by specifying maps as arguments
363 and using -f/--rcfile to source an external hgrc file.
363 and using -f/--rcfile to source an external hgrc file.
364
364
365 Use -d/--default to disable current configuration.
365 Use -d/--default to disable current configuration.
366
366
367 See :hg:`help templates` for information on templates and filters.
367 See :hg:`help templates` for information on templates and filters.
368 '''
368 '''
369 def demoitems(section, items):
369 def demoitems(section, items):
370 ui.write('[%s]\n' % section)
370 ui.write('[%s]\n' % section)
371 for k, v in sorted(items):
371 for k, v in sorted(items):
372 ui.write('%s = %s\n' % (k, v))
372 ui.write('%s = %s\n' % (k, v))
373
373
374 fn = 'demo.txt'
374 fn = 'demo.txt'
375 tmpdir = tempfile.mkdtemp('', 'kwdemo.')
375 tmpdir = tempfile.mkdtemp('', 'kwdemo.')
376 ui.note(_('creating temporary repository at %s\n') % tmpdir)
376 ui.note(_('creating temporary repository at %s\n') % tmpdir)
377 repo = localrepo.localrepository(ui, tmpdir, True)
377 repo = localrepo.localrepository(ui, tmpdir, True)
378 ui.setconfig('keyword', fn, '')
378 ui.setconfig('keyword', fn, '')
379 svn = ui.configbool('keywordset', 'svn')
379 svn = ui.configbool('keywordset', 'svn')
380 # explicitly set keywordset for demo output
380 # explicitly set keywordset for demo output
381 ui.setconfig('keywordset', 'svn', svn)
381 ui.setconfig('keywordset', 'svn', svn)
382
382
383 uikwmaps = ui.configitems('keywordmaps')
383 uikwmaps = ui.configitems('keywordmaps')
384 if args or opts.get('rcfile'):
384 if args or opts.get('rcfile'):
385 ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
385 ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
386 if uikwmaps:
386 if uikwmaps:
387 ui.status(_('\textending current template maps\n'))
387 ui.status(_('\textending current template maps\n'))
388 if opts.get('default') or not uikwmaps:
388 if opts.get('default') or not uikwmaps:
389 if svn:
389 if svn:
390 ui.status(_('\toverriding default svn keywordset\n'))
390 ui.status(_('\toverriding default svn keywordset\n'))
391 else:
391 else:
392 ui.status(_('\toverriding default cvs keywordset\n'))
392 ui.status(_('\toverriding default cvs keywordset\n'))
393 if opts.get('rcfile'):
393 if opts.get('rcfile'):
394 ui.readconfig(opts.get('rcfile'))
394 ui.readconfig(opts.get('rcfile'))
395 if args:
395 if args:
396 # simulate hgrc parsing
396 # simulate hgrc parsing
397 rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args]
397 rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args]
398 fp = repo.opener('hgrc', 'w')
398 fp = repo.opener('hgrc', 'w')
399 fp.writelines(rcmaps)
399 fp.writelines(rcmaps)
400 fp.close()
400 fp.close()
401 ui.readconfig(repo.join('hgrc'))
401 ui.readconfig(repo.join('hgrc'))
402 kwmaps = dict(ui.configitems('keywordmaps'))
402 kwmaps = dict(ui.configitems('keywordmaps'))
403 elif opts.get('default'):
403 elif opts.get('default'):
404 if svn:
404 if svn:
405 ui.status(_('\n\tconfiguration using default svn keywordset\n'))
405 ui.status(_('\n\tconfiguration using default svn keywordset\n'))
406 else:
406 else:
407 ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
407 ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
408 kwmaps = _defaultkwmaps(ui)
408 kwmaps = _defaultkwmaps(ui)
409 if uikwmaps:
409 if uikwmaps:
410 ui.status(_('\tdisabling current template maps\n'))
410 ui.status(_('\tdisabling current template maps\n'))
411 for k, v in kwmaps.iteritems():
411 for k, v in kwmaps.iteritems():
412 ui.setconfig('keywordmaps', k, v)
412 ui.setconfig('keywordmaps', k, v)
413 else:
413 else:
414 ui.status(_('\n\tconfiguration using current keyword template maps\n'))
414 ui.status(_('\n\tconfiguration using current keyword template maps\n'))
415 kwmaps = dict(uikwmaps) or _defaultkwmaps(ui)
415 kwmaps = dict(uikwmaps) or _defaultkwmaps(ui)
416
416
417 uisetup(ui)
417 uisetup(ui)
418 reposetup(ui, repo)
418 reposetup(ui, repo)
419 ui.write('[extensions]\nkeyword =\n')
419 ui.write('[extensions]\nkeyword =\n')
420 demoitems('keyword', ui.configitems('keyword'))
420 demoitems('keyword', ui.configitems('keyword'))
421 demoitems('keywordset', ui.configitems('keywordset'))
421 demoitems('keywordset', ui.configitems('keywordset'))
422 demoitems('keywordmaps', kwmaps.iteritems())
422 demoitems('keywordmaps', kwmaps.iteritems())
423 keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
423 keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
424 repo.wopener.write(fn, keywords)
424 repo.wopener.write(fn, keywords)
425 repo[None].add([fn])
425 repo[None].add([fn])
426 ui.note(_('\nkeywords written to %s:\n') % fn)
426 ui.note(_('\nkeywords written to %s:\n') % fn)
427 ui.note(keywords)
427 ui.note(keywords)
428 repo.dirstate.setbranch('demobranch')
428 repo.dirstate.setbranch('demobranch')
429 for name, cmd in ui.configitems('hooks'):
429 for name, cmd in ui.configitems('hooks'):
430 if name.split('.', 1)[0].find('commit') > -1:
430 if name.split('.', 1)[0].find('commit') > -1:
431 repo.ui.setconfig('hooks', name, '')
431 repo.ui.setconfig('hooks', name, '')
432 msg = _('hg keyword configuration and expansion example')
432 msg = _('hg keyword configuration and expansion example')
433 ui.note("hg ci -m '%s'\n" % msg)
433 ui.note("hg ci -m '%s'\n" % msg)
434 repo.commit(text=msg)
434 repo.commit(text=msg)
435 ui.status(_('\n\tkeywords expanded\n'))
435 ui.status(_('\n\tkeywords expanded\n'))
436 ui.write(repo.wread(fn))
436 ui.write(repo.wread(fn))
437 shutil.rmtree(tmpdir, ignore_errors=True)
437 shutil.rmtree(tmpdir, ignore_errors=True)
438
438
439 @command('kwexpand', commands.walkopts, _('hg kwexpand [OPTION]... [FILE]...'))
439 @command('kwexpand', commands.walkopts, _('hg kwexpand [OPTION]... [FILE]...'))
440 def expand(ui, repo, *pats, **opts):
440 def expand(ui, repo, *pats, **opts):
441 '''expand keywords in the working directory
441 '''expand keywords in the working directory
442
442
443 Run after (re)enabling keyword expansion.
443 Run after (re)enabling keyword expansion.
444
444
445 kwexpand refuses to run if given files contain local changes.
445 kwexpand refuses to run if given files contain local changes.
446 '''
446 '''
447 # 3rd argument sets expansion to True
447 # 3rd argument sets expansion to True
448 _kwfwrite(ui, repo, True, *pats, **opts)
448 _kwfwrite(ui, repo, True, *pats, **opts)
449
449
450 @command('kwfiles',
450 @command('kwfiles',
451 [('A', 'all', None, _('show keyword status flags of all files')),
451 [('A', 'all', None, _('show keyword status flags of all files')),
452 ('i', 'ignore', None, _('show files excluded from expansion')),
452 ('i', 'ignore', None, _('show files excluded from expansion')),
453 ('u', 'unknown', None, _('only show unknown (not tracked) files')),
453 ('u', 'unknown', None, _('only show unknown (not tracked) files')),
454 ] + commands.walkopts,
454 ] + commands.walkopts,
455 _('hg kwfiles [OPTION]... [FILE]...'))
455 _('hg kwfiles [OPTION]... [FILE]...'))
456 def files(ui, repo, *pats, **opts):
456 def files(ui, repo, *pats, **opts):
457 '''show files configured for keyword expansion
457 '''show files configured for keyword expansion
458
458
459 List which files in the working directory are matched by the
459 List which files in the working directory are matched by the
460 [keyword] configuration patterns.
460 [keyword] configuration patterns.
461
461
462 Useful to prevent inadvertent keyword expansion and to speed up
462 Useful to prevent inadvertent keyword expansion and to speed up
463 execution by including only files that are actual candidates for
463 execution by including only files that are actual candidates for
464 expansion.
464 expansion.
465
465
466 See :hg:`help keyword` on how to construct patterns both for
466 See :hg:`help keyword` on how to construct patterns both for
467 inclusion and exclusion of files.
467 inclusion and exclusion of files.
468
468
469 With -A/--all and -v/--verbose the codes used to show the status
469 With -A/--all and -v/--verbose the codes used to show the status
470 of files are::
470 of files are::
471
471
472 K = keyword expansion candidate
472 K = keyword expansion candidate
473 k = keyword expansion candidate (not tracked)
473 k = keyword expansion candidate (not tracked)
474 I = ignored
474 I = ignored
475 i = ignored (not tracked)
475 i = ignored (not tracked)
476 '''
476 '''
477 kwt = kwtools['templater']
477 kwt = kwtools['templater']
478 status = _status(ui, repo, kwt, *pats, **opts)
478 status = _status(ui, repo, kwt, *pats, **opts)
479 cwd = pats and repo.getcwd() or ''
479 cwd = pats and repo.getcwd() or ''
480 modified, added, removed, deleted, unknown, ignored, clean = status
480 modified, added, removed, deleted, unknown, ignored, clean = status
481 files = []
481 files = []
482 if not opts.get('unknown') or opts.get('all'):
482 if not opts.get('unknown') or opts.get('all'):
483 files = sorted(modified + added + clean)
483 files = sorted(modified + added + clean)
484 wctx = repo[None]
484 wctx = repo[None]
485 kwfiles = kwt.iskwfile(files, wctx)
485 kwfiles = kwt.iskwfile(files, wctx)
486 kwdeleted = kwt.iskwfile(deleted, wctx)
486 kwdeleted = kwt.iskwfile(deleted, wctx)
487 kwunknown = kwt.iskwfile(unknown, wctx)
487 kwunknown = kwt.iskwfile(unknown, wctx)
488 if not opts.get('ignore') or opts.get('all'):
488 if not opts.get('ignore') or opts.get('all'):
489 showfiles = kwfiles, kwdeleted, kwunknown
489 showfiles = kwfiles, kwdeleted, kwunknown
490 else:
490 else:
491 showfiles = [], [], []
491 showfiles = [], [], []
492 if opts.get('all') or opts.get('ignore'):
492 if opts.get('all') or opts.get('ignore'):
493 showfiles += ([f for f in files if f not in kwfiles],
493 showfiles += ([f for f in files if f not in kwfiles],
494 [f for f in unknown if f not in kwunknown])
494 [f for f in unknown if f not in kwunknown])
495 kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
495 kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
496 kwstates = zip('K!kIi', showfiles, kwlabels)
496 kwstates = zip('K!kIi', showfiles, kwlabels)
497 for char, filenames, kwstate in kwstates:
497 for char, filenames, kwstate in kwstates:
498 fmt = (opts.get('all') or ui.verbose) and '%s %%s\n' % char or '%s\n'
498 fmt = (opts.get('all') or ui.verbose) and '%s %%s\n' % char or '%s\n'
499 for f in filenames:
499 for f in filenames:
500 ui.write(fmt % repo.pathto(f, cwd), label='kwfiles.' + kwstate)
500 ui.write(fmt % repo.pathto(f, cwd), label='kwfiles.' + kwstate)
501
501
502 @command('kwshrink', commands.walkopts, _('hg kwshrink [OPTION]... [FILE]...'))
502 @command('kwshrink', commands.walkopts, _('hg kwshrink [OPTION]... [FILE]...'))
503 def shrink(ui, repo, *pats, **opts):
503 def shrink(ui, repo, *pats, **opts):
504 '''revert expanded keywords in the working directory
504 '''revert expanded keywords in the working directory
505
505
506 Must be run before changing/disabling active keywords.
506 Must be run before changing/disabling active keywords.
507
507
508 kwshrink refuses to run if given files contain local changes.
508 kwshrink refuses to run if given files contain local changes.
509 '''
509 '''
510 # 3rd argument sets expansion to False
510 # 3rd argument sets expansion to False
511 _kwfwrite(ui, repo, False, *pats, **opts)
511 _kwfwrite(ui, repo, False, *pats, **opts)
512
512
513
513
514 def uisetup(ui):
514 def uisetup(ui):
515 ''' Monkeypatches dispatch._parse to retrieve user command.'''
515 ''' Monkeypatches dispatch._parse to retrieve user command.'''
516
516
517 def kwdispatch_parse(orig, ui, args):
517 def kwdispatch_parse(orig, ui, args):
518 '''Monkeypatch dispatch._parse to obtain running hg command.'''
518 '''Monkeypatch dispatch._parse to obtain running hg command.'''
519 cmd, func, args, options, cmdoptions = orig(ui, args)
519 cmd, func, args, options, cmdoptions = orig(ui, args)
520 kwtools['hgcmd'] = cmd
520 kwtools['hgcmd'] = cmd
521 return cmd, func, args, options, cmdoptions
521 return cmd, func, args, options, cmdoptions
522
522
523 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
523 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
524
524
525 def reposetup(ui, repo):
525 def reposetup(ui, repo):
526 '''Sets up repo as kwrepo for keyword substitution.
526 '''Sets up repo as kwrepo for keyword substitution.
527 Overrides file method to return kwfilelog instead of filelog
527 Overrides file method to return kwfilelog instead of filelog
528 if file matches user configuration.
528 if file matches user configuration.
529 Wraps commit to overwrite configured files with updated
529 Wraps commit to overwrite configured files with updated
530 keyword substitutions.
530 keyword substitutions.
531 Monkeypatches patch and webcommands.'''
531 Monkeypatches patch and webcommands.'''
532
532
533 try:
533 try:
534 if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
534 if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
535 or '.hg' in util.splitpath(repo.root)
535 or '.hg' in util.splitpath(repo.root)
536 or repo._url.startswith('bundle:')):
536 or repo._url.startswith('bundle:')):
537 return
537 return
538 except AttributeError:
538 except AttributeError:
539 pass
539 pass
540
540
541 inc, exc = [], ['.hg*']
541 inc, exc = [], ['.hg*']
542 for pat, opt in ui.configitems('keyword'):
542 for pat, opt in ui.configitems('keyword'):
543 if opt != 'ignore':
543 if opt != 'ignore':
544 inc.append(pat)
544 inc.append(pat)
545 else:
545 else:
546 exc.append(pat)
546 exc.append(pat)
547 if not inc:
547 if not inc:
548 return
548 return
549
549
550 kwtools['templater'] = kwt = kwtemplater(ui, repo, inc, exc)
550 kwtools['templater'] = kwt = kwtemplater(ui, repo, inc, exc)
551
551
552 class kwrepo(repo.__class__):
552 class kwrepo(repo.__class__):
553 def file(self, f):
553 def file(self, f):
554 if f[0] == '/':
554 if f[0] == '/':
555 f = f[1:]
555 f = f[1:]
556 return kwfilelog(self.sopener, kwt, f)
556 return kwfilelog(self.sopener, kwt, f)
557
557
558 def wread(self, filename):
558 def wread(self, filename):
559 data = super(kwrepo, self).wread(filename)
559 data = super(kwrepo, self).wread(filename)
560 return kwt.wread(filename, data)
560 return kwt.wread(filename, data)
561
561
562 def commit(self, *args, **opts):
562 def commit(self, *args, **opts):
563 # use custom commitctx for user commands
563 # use custom commitctx for user commands
564 # other extensions can still wrap repo.commitctx directly
564 # other extensions can still wrap repo.commitctx directly
565 self.commitctx = self.kwcommitctx
565 self.commitctx = self.kwcommitctx
566 try:
566 try:
567 return super(kwrepo, self).commit(*args, **opts)
567 return super(kwrepo, self).commit(*args, **opts)
568 finally:
568 finally:
569 del self.commitctx
569 del self.commitctx
570
570
571 def kwcommitctx(self, ctx, error=False):
571 def kwcommitctx(self, ctx, error=False):
572 n = super(kwrepo, self).commitctx(ctx, error)
572 n = super(kwrepo, self).commitctx(ctx, error)
573 # no lock needed, only called from repo.commit() which already locks
573 # no lock needed, only called from repo.commit() which already locks
574 if not kwt.record:
574 if not kwt.record:
575 restrict = kwt.restrict
575 restrict = kwt.restrict
576 kwt.restrict = True
576 kwt.restrict = True
577 kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
577 kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
578 False, True)
578 False, True)
579 kwt.restrict = restrict
579 kwt.restrict = restrict
580 return n
580 return n
581
581
582 def rollback(self, dryrun=False):
582 def rollback(self, dryrun=False):
583 wlock = self.wlock()
583 wlock = self.wlock()
584 try:
584 try:
585 if not dryrun:
585 if not dryrun:
586 changed = self['.'].files()
586 changed = self['.'].files()
587 ret = super(kwrepo, self).rollback(dryrun)
587 ret = super(kwrepo, self).rollback(dryrun)
588 if not dryrun:
588 if not dryrun:
589 ctx = self['.']
589 ctx = self['.']
590 modified, added = _preselect(self[None].status(), changed)
590 modified, added = _preselect(self[None].status(), changed)
591 kwt.overwrite(ctx, modified, True, True)
591 kwt.overwrite(ctx, modified, True, True)
592 kwt.overwrite(ctx, added, True, False)
592 kwt.overwrite(ctx, added, True, False)
593 return ret
593 return ret
594 finally:
594 finally:
595 wlock.release()
595 wlock.release()
596
596
597 # monkeypatches
597 # monkeypatches
598 def kwpatchfile_init(orig, self, ui, fname, opener,
598 def kwpatchfile_init(orig, self, ui, fname, backend,
599 missing=False, eolmode=None):
599 missing=False, eolmode=None):
600 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
600 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
601 rejects or conflicts due to expanded keywords in working dir.'''
601 rejects or conflicts due to expanded keywords in working dir.'''
602 orig(self, ui, fname, opener, missing, eolmode)
602 orig(self, ui, fname, backend, missing, eolmode)
603 # shrink keywords read from working dir
603 # shrink keywords read from working dir
604 self.lines = kwt.shrinklines(self.fname, self.lines)
604 self.lines = kwt.shrinklines(self.fname, self.lines)
605
605
606 def kw_diff(orig, repo, node1=None, node2=None, match=None, changes=None,
606 def kw_diff(orig, repo, node1=None, node2=None, match=None, changes=None,
607 opts=None, prefix=''):
607 opts=None, prefix=''):
608 '''Monkeypatch patch.diff to avoid expansion.'''
608 '''Monkeypatch patch.diff to avoid expansion.'''
609 kwt.restrict = True
609 kwt.restrict = True
610 return orig(repo, node1, node2, match, changes, opts, prefix)
610 return orig(repo, node1, node2, match, changes, opts, prefix)
611
611
612 def kwweb_skip(orig, web, req, tmpl):
612 def kwweb_skip(orig, web, req, tmpl):
613 '''Wraps webcommands.x turning off keyword expansion.'''
613 '''Wraps webcommands.x turning off keyword expansion.'''
614 kwt.match = util.never
614 kwt.match = util.never
615 return orig(web, req, tmpl)
615 return orig(web, req, tmpl)
616
616
617 def kw_copy(orig, ui, repo, pats, opts, rename=False):
617 def kw_copy(orig, ui, repo, pats, opts, rename=False):
618 '''Wraps cmdutil.copy so that copy/rename destinations do not
618 '''Wraps cmdutil.copy so that copy/rename destinations do not
619 contain expanded keywords.
619 contain expanded keywords.
620 Note that the source of a regular file destination may also be a
620 Note that the source of a regular file destination may also be a
621 symlink:
621 symlink:
622 hg cp sym x -> x is symlink
622 hg cp sym x -> x is symlink
623 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
623 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
624 For the latter we have to follow the symlink to find out whether its
624 For the latter we have to follow the symlink to find out whether its
625 target is configured for expansion and we therefore must unexpand the
625 target is configured for expansion and we therefore must unexpand the
626 keywords in the destination.'''
626 keywords in the destination.'''
627 orig(ui, repo, pats, opts, rename)
627 orig(ui, repo, pats, opts, rename)
628 if opts.get('dry_run'):
628 if opts.get('dry_run'):
629 return
629 return
630 wctx = repo[None]
630 wctx = repo[None]
631 cwd = repo.getcwd()
631 cwd = repo.getcwd()
632
632
633 def haskwsource(dest):
633 def haskwsource(dest):
634 '''Returns true if dest is a regular file and configured for
634 '''Returns true if dest is a regular file and configured for
635 expansion or a symlink which points to a file configured for
635 expansion or a symlink which points to a file configured for
636 expansion. '''
636 expansion. '''
637 source = repo.dirstate.copied(dest)
637 source = repo.dirstate.copied(dest)
638 if 'l' in wctx.flags(source):
638 if 'l' in wctx.flags(source):
639 source = scmutil.canonpath(repo.root, cwd,
639 source = scmutil.canonpath(repo.root, cwd,
640 os.path.realpath(source))
640 os.path.realpath(source))
641 return kwt.match(source)
641 return kwt.match(source)
642
642
643 candidates = [f for f in repo.dirstate.copies() if
643 candidates = [f for f in repo.dirstate.copies() if
644 not 'l' in wctx.flags(f) and haskwsource(f)]
644 not 'l' in wctx.flags(f) and haskwsource(f)]
645 kwt.overwrite(wctx, candidates, False, False)
645 kwt.overwrite(wctx, candidates, False, False)
646
646
647 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
647 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
648 '''Wraps record.dorecord expanding keywords after recording.'''
648 '''Wraps record.dorecord expanding keywords after recording.'''
649 wlock = repo.wlock()
649 wlock = repo.wlock()
650 try:
650 try:
651 # record returns 0 even when nothing has changed
651 # record returns 0 even when nothing has changed
652 # therefore compare nodes before and after
652 # therefore compare nodes before and after
653 kwt.record = True
653 kwt.record = True
654 ctx = repo['.']
654 ctx = repo['.']
655 wstatus = repo[None].status()
655 wstatus = repo[None].status()
656 ret = orig(ui, repo, commitfunc, *pats, **opts)
656 ret = orig(ui, repo, commitfunc, *pats, **opts)
657 recctx = repo['.']
657 recctx = repo['.']
658 if ctx != recctx:
658 if ctx != recctx:
659 modified, added = _preselect(wstatus, recctx.files())
659 modified, added = _preselect(wstatus, recctx.files())
660 kwt.restrict = False
660 kwt.restrict = False
661 kwt.overwrite(recctx, modified, False, True)
661 kwt.overwrite(recctx, modified, False, True)
662 kwt.overwrite(recctx, added, False, True, True)
662 kwt.overwrite(recctx, added, False, True, True)
663 kwt.restrict = True
663 kwt.restrict = True
664 return ret
664 return ret
665 finally:
665 finally:
666 wlock.release()
666 wlock.release()
667
667
668 def kwfilectx_cmp(orig, self, fctx):
668 def kwfilectx_cmp(orig, self, fctx):
669 # keyword affects data size, comparing wdir and filelog size does
669 # keyword affects data size, comparing wdir and filelog size does
670 # not make sense
670 # not make sense
671 if (fctx._filerev is None and
671 if (fctx._filerev is None and
672 (self._repo._encodefilterpats or
672 (self._repo._encodefilterpats or
673 kwt.match(fctx.path()) and not 'l' in fctx.flags()) or
673 kwt.match(fctx.path()) and not 'l' in fctx.flags()) or
674 self.size() == fctx.size()):
674 self.size() == fctx.size()):
675 return self._filelog.cmp(self._filenode, fctx.data())
675 return self._filelog.cmp(self._filenode, fctx.data())
676 return True
676 return True
677
677
678 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
678 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
679 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
679 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
680 extensions.wrapfunction(patch, 'diff', kw_diff)
680 extensions.wrapfunction(patch, 'diff', kw_diff)
681 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
681 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
682 for c in 'annotate changeset rev filediff diff'.split():
682 for c in 'annotate changeset rev filediff diff'.split():
683 extensions.wrapfunction(webcommands, c, kwweb_skip)
683 extensions.wrapfunction(webcommands, c, kwweb_skip)
684 for name in recordextensions.split():
684 for name in recordextensions.split():
685 try:
685 try:
686 record = extensions.find(name)
686 record = extensions.find(name)
687 extensions.wrapfunction(record, 'dorecord', kw_dorecord)
687 extensions.wrapfunction(record, 'dorecord', kw_dorecord)
688 except KeyError:
688 except KeyError:
689 pass
689 pass
690
690
691 repo.__class__ = kwrepo
691 repo.__class__ = kwrepo
@@ -1,1697 +1,1743 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email.Parser, os, errno, re
9 import cStringIO, email.Parser, os, errno, re
10 import tempfile, zlib
10 import tempfile, zlib
11
11
12 from i18n import _
12 from i18n import _
13 from node import hex, nullid, short
13 from node import hex, nullid, short
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding
15
15
16 gitre = re.compile('diff --git a/(.*) b/(.*)')
16 gitre = re.compile('diff --git a/(.*) b/(.*)')
17
17
18 class PatchError(Exception):
18 class PatchError(Exception):
19 pass
19 pass
20
20
21 # helper functions
21 # helper functions
22
22
23 def copyfile(src, dst, basedir):
23 def copyfile(src, dst, basedir):
24 abssrc, absdst = [scmutil.canonpath(basedir, basedir, x)
24 abssrc, absdst = [scmutil.canonpath(basedir, basedir, x)
25 for x in [src, dst]]
25 for x in [src, dst]]
26 if os.path.lexists(absdst):
26 if os.path.lexists(absdst):
27 raise util.Abort(_("cannot create %s: destination already exists") %
27 raise util.Abort(_("cannot create %s: destination already exists") %
28 dst)
28 dst)
29
29
30 dstdir = os.path.dirname(absdst)
30 dstdir = os.path.dirname(absdst)
31 if dstdir and not os.path.isdir(dstdir):
31 if dstdir and not os.path.isdir(dstdir):
32 try:
32 try:
33 os.makedirs(dstdir)
33 os.makedirs(dstdir)
34 except IOError:
34 except IOError:
35 raise util.Abort(
35 raise util.Abort(
36 _("cannot create %s: unable to create destination directory")
36 _("cannot create %s: unable to create destination directory")
37 % dst)
37 % dst)
38
38
39 util.copyfile(abssrc, absdst)
39 util.copyfile(abssrc, absdst)
40
40
41 # public functions
41 # public functions
42
42
43 def split(stream):
43 def split(stream):
44 '''return an iterator of individual patches from a stream'''
44 '''return an iterator of individual patches from a stream'''
45 def isheader(line, inheader):
45 def isheader(line, inheader):
46 if inheader and line[0] in (' ', '\t'):
46 if inheader and line[0] in (' ', '\t'):
47 # continuation
47 # continuation
48 return True
48 return True
49 if line[0] in (' ', '-', '+'):
49 if line[0] in (' ', '-', '+'):
50 # diff line - don't check for header pattern in there
50 # diff line - don't check for header pattern in there
51 return False
51 return False
52 l = line.split(': ', 1)
52 l = line.split(': ', 1)
53 return len(l) == 2 and ' ' not in l[0]
53 return len(l) == 2 and ' ' not in l[0]
54
54
55 def chunk(lines):
55 def chunk(lines):
56 return cStringIO.StringIO(''.join(lines))
56 return cStringIO.StringIO(''.join(lines))
57
57
58 def hgsplit(stream, cur):
58 def hgsplit(stream, cur):
59 inheader = True
59 inheader = True
60
60
61 for line in stream:
61 for line in stream:
62 if not line.strip():
62 if not line.strip():
63 inheader = False
63 inheader = False
64 if not inheader and line.startswith('# HG changeset patch'):
64 if not inheader and line.startswith('# HG changeset patch'):
65 yield chunk(cur)
65 yield chunk(cur)
66 cur = []
66 cur = []
67 inheader = True
67 inheader = True
68
68
69 cur.append(line)
69 cur.append(line)
70
70
71 if cur:
71 if cur:
72 yield chunk(cur)
72 yield chunk(cur)
73
73
74 def mboxsplit(stream, cur):
74 def mboxsplit(stream, cur):
75 for line in stream:
75 for line in stream:
76 if line.startswith('From '):
76 if line.startswith('From '):
77 for c in split(chunk(cur[1:])):
77 for c in split(chunk(cur[1:])):
78 yield c
78 yield c
79 cur = []
79 cur = []
80
80
81 cur.append(line)
81 cur.append(line)
82
82
83 if cur:
83 if cur:
84 for c in split(chunk(cur[1:])):
84 for c in split(chunk(cur[1:])):
85 yield c
85 yield c
86
86
87 def mimesplit(stream, cur):
87 def mimesplit(stream, cur):
88 def msgfp(m):
88 def msgfp(m):
89 fp = cStringIO.StringIO()
89 fp = cStringIO.StringIO()
90 g = email.Generator.Generator(fp, mangle_from_=False)
90 g = email.Generator.Generator(fp, mangle_from_=False)
91 g.flatten(m)
91 g.flatten(m)
92 fp.seek(0)
92 fp.seek(0)
93 return fp
93 return fp
94
94
95 for line in stream:
95 for line in stream:
96 cur.append(line)
96 cur.append(line)
97 c = chunk(cur)
97 c = chunk(cur)
98
98
99 m = email.Parser.Parser().parse(c)
99 m = email.Parser.Parser().parse(c)
100 if not m.is_multipart():
100 if not m.is_multipart():
101 yield msgfp(m)
101 yield msgfp(m)
102 else:
102 else:
103 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
103 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
104 for part in m.walk():
104 for part in m.walk():
105 ct = part.get_content_type()
105 ct = part.get_content_type()
106 if ct not in ok_types:
106 if ct not in ok_types:
107 continue
107 continue
108 yield msgfp(part)
108 yield msgfp(part)
109
109
110 def headersplit(stream, cur):
110 def headersplit(stream, cur):
111 inheader = False
111 inheader = False
112
112
113 for line in stream:
113 for line in stream:
114 if not inheader and isheader(line, inheader):
114 if not inheader and isheader(line, inheader):
115 yield chunk(cur)
115 yield chunk(cur)
116 cur = []
116 cur = []
117 inheader = True
117 inheader = True
118 if inheader and not isheader(line, inheader):
118 if inheader and not isheader(line, inheader):
119 inheader = False
119 inheader = False
120
120
121 cur.append(line)
121 cur.append(line)
122
122
123 if cur:
123 if cur:
124 yield chunk(cur)
124 yield chunk(cur)
125
125
126 def remainder(cur):
126 def remainder(cur):
127 yield chunk(cur)
127 yield chunk(cur)
128
128
129 class fiter(object):
129 class fiter(object):
130 def __init__(self, fp):
130 def __init__(self, fp):
131 self.fp = fp
131 self.fp = fp
132
132
133 def __iter__(self):
133 def __iter__(self):
134 return self
134 return self
135
135
136 def next(self):
136 def next(self):
137 l = self.fp.readline()
137 l = self.fp.readline()
138 if not l:
138 if not l:
139 raise StopIteration
139 raise StopIteration
140 return l
140 return l
141
141
142 inheader = False
142 inheader = False
143 cur = []
143 cur = []
144
144
145 mimeheaders = ['content-type']
145 mimeheaders = ['content-type']
146
146
147 if not hasattr(stream, 'next'):
147 if not hasattr(stream, 'next'):
148 # http responses, for example, have readline but not next
148 # http responses, for example, have readline but not next
149 stream = fiter(stream)
149 stream = fiter(stream)
150
150
151 for line in stream:
151 for line in stream:
152 cur.append(line)
152 cur.append(line)
153 if line.startswith('# HG changeset patch'):
153 if line.startswith('# HG changeset patch'):
154 return hgsplit(stream, cur)
154 return hgsplit(stream, cur)
155 elif line.startswith('From '):
155 elif line.startswith('From '):
156 return mboxsplit(stream, cur)
156 return mboxsplit(stream, cur)
157 elif isheader(line, inheader):
157 elif isheader(line, inheader):
158 inheader = True
158 inheader = True
159 if line.split(':', 1)[0].lower() in mimeheaders:
159 if line.split(':', 1)[0].lower() in mimeheaders:
160 # let email parser handle this
160 # let email parser handle this
161 return mimesplit(stream, cur)
161 return mimesplit(stream, cur)
162 elif line.startswith('--- ') and inheader:
162 elif line.startswith('--- ') and inheader:
163 # No evil headers seen by diff start, split by hand
163 # No evil headers seen by diff start, split by hand
164 return headersplit(stream, cur)
164 return headersplit(stream, cur)
165 # Not enough info, keep reading
165 # Not enough info, keep reading
166
166
167 # if we are here, we have a very plain patch
167 # if we are here, we have a very plain patch
168 return remainder(cur)
168 return remainder(cur)
169
169
170 def extract(ui, fileobj):
170 def extract(ui, fileobj):
171 '''extract patch from data read from fileobj.
171 '''extract patch from data read from fileobj.
172
172
173 patch can be a normal patch or contained in an email message.
173 patch can be a normal patch or contained in an email message.
174
174
175 return tuple (filename, message, user, date, branch, node, p1, p2).
175 return tuple (filename, message, user, date, branch, node, p1, p2).
176 Any item in the returned tuple can be None. If filename is None,
176 Any item in the returned tuple can be None. If filename is None,
177 fileobj did not contain a patch. Caller must unlink filename when done.'''
177 fileobj did not contain a patch. Caller must unlink filename when done.'''
178
178
179 # attempt to detect the start of a patch
179 # attempt to detect the start of a patch
180 # (this heuristic is borrowed from quilt)
180 # (this heuristic is borrowed from quilt)
181 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
181 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
182 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
182 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
183 r'---[ \t].*?^\+\+\+[ \t]|'
183 r'---[ \t].*?^\+\+\+[ \t]|'
184 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
184 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
185
185
186 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
186 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
187 tmpfp = os.fdopen(fd, 'w')
187 tmpfp = os.fdopen(fd, 'w')
188 try:
188 try:
189 msg = email.Parser.Parser().parse(fileobj)
189 msg = email.Parser.Parser().parse(fileobj)
190
190
191 subject = msg['Subject']
191 subject = msg['Subject']
192 user = msg['From']
192 user = msg['From']
193 if not subject and not user:
193 if not subject and not user:
194 # Not an email, restore parsed headers if any
194 # Not an email, restore parsed headers if any
195 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
195 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
196
196
197 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
197 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
198 # should try to parse msg['Date']
198 # should try to parse msg['Date']
199 date = None
199 date = None
200 nodeid = None
200 nodeid = None
201 branch = None
201 branch = None
202 parents = []
202 parents = []
203
203
204 if subject:
204 if subject:
205 if subject.startswith('[PATCH'):
205 if subject.startswith('[PATCH'):
206 pend = subject.find(']')
206 pend = subject.find(']')
207 if pend >= 0:
207 if pend >= 0:
208 subject = subject[pend + 1:].lstrip()
208 subject = subject[pend + 1:].lstrip()
209 subject = subject.replace('\n\t', ' ')
209 subject = subject.replace('\n\t', ' ')
210 ui.debug('Subject: %s\n' % subject)
210 ui.debug('Subject: %s\n' % subject)
211 if user:
211 if user:
212 ui.debug('From: %s\n' % user)
212 ui.debug('From: %s\n' % user)
213 diffs_seen = 0
213 diffs_seen = 0
214 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
214 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
215 message = ''
215 message = ''
216 for part in msg.walk():
216 for part in msg.walk():
217 content_type = part.get_content_type()
217 content_type = part.get_content_type()
218 ui.debug('Content-Type: %s\n' % content_type)
218 ui.debug('Content-Type: %s\n' % content_type)
219 if content_type not in ok_types:
219 if content_type not in ok_types:
220 continue
220 continue
221 payload = part.get_payload(decode=True)
221 payload = part.get_payload(decode=True)
222 m = diffre.search(payload)
222 m = diffre.search(payload)
223 if m:
223 if m:
224 hgpatch = False
224 hgpatch = False
225 hgpatchheader = False
225 hgpatchheader = False
226 ignoretext = False
226 ignoretext = False
227
227
228 ui.debug('found patch at byte %d\n' % m.start(0))
228 ui.debug('found patch at byte %d\n' % m.start(0))
229 diffs_seen += 1
229 diffs_seen += 1
230 cfp = cStringIO.StringIO()
230 cfp = cStringIO.StringIO()
231 for line in payload[:m.start(0)].splitlines():
231 for line in payload[:m.start(0)].splitlines():
232 if line.startswith('# HG changeset patch') and not hgpatch:
232 if line.startswith('# HG changeset patch') and not hgpatch:
233 ui.debug('patch generated by hg export\n')
233 ui.debug('patch generated by hg export\n')
234 hgpatch = True
234 hgpatch = True
235 hgpatchheader = True
235 hgpatchheader = True
236 # drop earlier commit message content
236 # drop earlier commit message content
237 cfp.seek(0)
237 cfp.seek(0)
238 cfp.truncate()
238 cfp.truncate()
239 subject = None
239 subject = None
240 elif hgpatchheader:
240 elif hgpatchheader:
241 if line.startswith('# User '):
241 if line.startswith('# User '):
242 user = line[7:]
242 user = line[7:]
243 ui.debug('From: %s\n' % user)
243 ui.debug('From: %s\n' % user)
244 elif line.startswith("# Date "):
244 elif line.startswith("# Date "):
245 date = line[7:]
245 date = line[7:]
246 elif line.startswith("# Branch "):
246 elif line.startswith("# Branch "):
247 branch = line[9:]
247 branch = line[9:]
248 elif line.startswith("# Node ID "):
248 elif line.startswith("# Node ID "):
249 nodeid = line[10:]
249 nodeid = line[10:]
250 elif line.startswith("# Parent "):
250 elif line.startswith("# Parent "):
251 parents.append(line[10:])
251 parents.append(line[10:])
252 elif not line.startswith("# "):
252 elif not line.startswith("# "):
253 hgpatchheader = False
253 hgpatchheader = False
254 elif line == '---' and gitsendmail:
254 elif line == '---' and gitsendmail:
255 ignoretext = True
255 ignoretext = True
256 if not hgpatchheader and not ignoretext:
256 if not hgpatchheader and not ignoretext:
257 cfp.write(line)
257 cfp.write(line)
258 cfp.write('\n')
258 cfp.write('\n')
259 message = cfp.getvalue()
259 message = cfp.getvalue()
260 if tmpfp:
260 if tmpfp:
261 tmpfp.write(payload)
261 tmpfp.write(payload)
262 if not payload.endswith('\n'):
262 if not payload.endswith('\n'):
263 tmpfp.write('\n')
263 tmpfp.write('\n')
264 elif not diffs_seen and message and content_type == 'text/plain':
264 elif not diffs_seen and message and content_type == 'text/plain':
265 message += '\n' + payload
265 message += '\n' + payload
266 except:
266 except:
267 tmpfp.close()
267 tmpfp.close()
268 os.unlink(tmpname)
268 os.unlink(tmpname)
269 raise
269 raise
270
270
271 if subject and not message.startswith(subject):
271 if subject and not message.startswith(subject):
272 message = '%s\n%s' % (subject, message)
272 message = '%s\n%s' % (subject, message)
273 tmpfp.close()
273 tmpfp.close()
274 if not diffs_seen:
274 if not diffs_seen:
275 os.unlink(tmpname)
275 os.unlink(tmpname)
276 return None, message, user, date, branch, None, None, None
276 return None, message, user, date, branch, None, None, None
277 p1 = parents and parents.pop(0) or None
277 p1 = parents and parents.pop(0) or None
278 p2 = parents and parents.pop(0) or None
278 p2 = parents and parents.pop(0) or None
279 return tmpname, message, user, date, branch, nodeid, p1, p2
279 return tmpname, message, user, date, branch, nodeid, p1, p2
280
280
281 class patchmeta(object):
281 class patchmeta(object):
282 """Patched file metadata
282 """Patched file metadata
283
283
284 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
284 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
285 or COPY. 'path' is patched file path. 'oldpath' is set to the
285 or COPY. 'path' is patched file path. 'oldpath' is set to the
286 origin file when 'op' is either COPY or RENAME, None otherwise. If
286 origin file when 'op' is either COPY or RENAME, None otherwise. If
287 file mode is changed, 'mode' is a tuple (islink, isexec) where
287 file mode is changed, 'mode' is a tuple (islink, isexec) where
288 'islink' is True if the file is a symlink and 'isexec' is True if
288 'islink' is True if the file is a symlink and 'isexec' is True if
289 the file is executable. Otherwise, 'mode' is None.
289 the file is executable. Otherwise, 'mode' is None.
290 """
290 """
291 def __init__(self, path):
291 def __init__(self, path):
292 self.path = path
292 self.path = path
293 self.oldpath = None
293 self.oldpath = None
294 self.mode = None
294 self.mode = None
295 self.op = 'MODIFY'
295 self.op = 'MODIFY'
296 self.binary = False
296 self.binary = False
297
297
298 def setmode(self, mode):
298 def setmode(self, mode):
299 islink = mode & 020000
299 islink = mode & 020000
300 isexec = mode & 0100
300 isexec = mode & 0100
301 self.mode = (islink, isexec)
301 self.mode = (islink, isexec)
302
302
303 def __repr__(self):
303 def __repr__(self):
304 return "<patchmeta %s %r>" % (self.op, self.path)
304 return "<patchmeta %s %r>" % (self.op, self.path)
305
305
306 def readgitpatch(lr):
306 def readgitpatch(lr):
307 """extract git-style metadata about patches from <patchname>"""
307 """extract git-style metadata about patches from <patchname>"""
308
308
309 # Filter patch for git information
309 # Filter patch for git information
310 gp = None
310 gp = None
311 gitpatches = []
311 gitpatches = []
312 for line in lr:
312 for line in lr:
313 line = line.rstrip(' \r\n')
313 line = line.rstrip(' \r\n')
314 if line.startswith('diff --git'):
314 if line.startswith('diff --git'):
315 m = gitre.match(line)
315 m = gitre.match(line)
316 if m:
316 if m:
317 if gp:
317 if gp:
318 gitpatches.append(gp)
318 gitpatches.append(gp)
319 dst = m.group(2)
319 dst = m.group(2)
320 gp = patchmeta(dst)
320 gp = patchmeta(dst)
321 elif gp:
321 elif gp:
322 if line.startswith('--- '):
322 if line.startswith('--- '):
323 gitpatches.append(gp)
323 gitpatches.append(gp)
324 gp = None
324 gp = None
325 continue
325 continue
326 if line.startswith('rename from '):
326 if line.startswith('rename from '):
327 gp.op = 'RENAME'
327 gp.op = 'RENAME'
328 gp.oldpath = line[12:]
328 gp.oldpath = line[12:]
329 elif line.startswith('rename to '):
329 elif line.startswith('rename to '):
330 gp.path = line[10:]
330 gp.path = line[10:]
331 elif line.startswith('copy from '):
331 elif line.startswith('copy from '):
332 gp.op = 'COPY'
332 gp.op = 'COPY'
333 gp.oldpath = line[10:]
333 gp.oldpath = line[10:]
334 elif line.startswith('copy to '):
334 elif line.startswith('copy to '):
335 gp.path = line[8:]
335 gp.path = line[8:]
336 elif line.startswith('deleted file'):
336 elif line.startswith('deleted file'):
337 gp.op = 'DELETE'
337 gp.op = 'DELETE'
338 elif line.startswith('new file mode '):
338 elif line.startswith('new file mode '):
339 gp.op = 'ADD'
339 gp.op = 'ADD'
340 gp.setmode(int(line[-6:], 8))
340 gp.setmode(int(line[-6:], 8))
341 elif line.startswith('new mode '):
341 elif line.startswith('new mode '):
342 gp.setmode(int(line[-6:], 8))
342 gp.setmode(int(line[-6:], 8))
343 elif line.startswith('GIT binary patch'):
343 elif line.startswith('GIT binary patch'):
344 gp.binary = True
344 gp.binary = True
345 if gp:
345 if gp:
346 gitpatches.append(gp)
346 gitpatches.append(gp)
347
347
348 return gitpatches
348 return gitpatches
349
349
350 class linereader(object):
350 class linereader(object):
351 # simple class to allow pushing lines back into the input stream
351 # simple class to allow pushing lines back into the input stream
352 def __init__(self, fp, textmode=False):
352 def __init__(self, fp, textmode=False):
353 self.fp = fp
353 self.fp = fp
354 self.buf = []
354 self.buf = []
355 self.textmode = textmode
355 self.textmode = textmode
356 self.eol = None
356 self.eol = None
357
357
358 def push(self, line):
358 def push(self, line):
359 if line is not None:
359 if line is not None:
360 self.buf.append(line)
360 self.buf.append(line)
361
361
362 def readline(self):
362 def readline(self):
363 if self.buf:
363 if self.buf:
364 l = self.buf[0]
364 l = self.buf[0]
365 del self.buf[0]
365 del self.buf[0]
366 return l
366 return l
367 l = self.fp.readline()
367 l = self.fp.readline()
368 if not self.eol:
368 if not self.eol:
369 if l.endswith('\r\n'):
369 if l.endswith('\r\n'):
370 self.eol = '\r\n'
370 self.eol = '\r\n'
371 elif l.endswith('\n'):
371 elif l.endswith('\n'):
372 self.eol = '\n'
372 self.eol = '\n'
373 if self.textmode and l.endswith('\r\n'):
373 if self.textmode and l.endswith('\r\n'):
374 l = l[:-2] + '\n'
374 l = l[:-2] + '\n'
375 return l
375 return l
376
376
377 def __iter__(self):
377 def __iter__(self):
378 while 1:
378 while 1:
379 l = self.readline()
379 l = self.readline()
380 if not l:
380 if not l:
381 break
381 break
382 yield l
382 yield l
383
383
384 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
384 class abstractbackend(object):
385 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
385 def __init__(self, ui):
386 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
387 eolmodes = ['strict', 'crlf', 'lf', 'auto']
388
389 class patchfile(object):
390 def __init__(self, ui, fname, opener, missing=False, eolmode='strict'):
391 self.fname = fname
392 self.eolmode = eolmode
393 self.eol = None
394 self.opener = opener
395 self.ui = ui
386 self.ui = ui
396 self.lines = []
387
397 self.exists = False
388 def readlines(self, fname):
398 self.missing = missing
389 """Return target file lines, or its content as a single line
399 if not missing:
390 for symlinks.
400 try:
391 """
401 self.lines = self.readlines(fname)
392 raise NotImplementedError
402 self.exists = True
393
403 except IOError:
394 def writelines(self, fname, lines):
395 """Write lines to target file."""
396 raise NotImplementedError
397
398 def unlink(self, fname):
399 """Unlink target file."""
400 raise NotImplementedError
401
402 def writerej(self, fname, failed, total, lines):
403 """Write rejected lines for fname. total is the number of hunks
404 which failed to apply and total the total number of hunks for this
405 files.
406 """
404 pass
407 pass
405 else:
406 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
407
408
408 self.hash = {}
409 class fsbackend(abstractbackend):
409 self.dirty = False
410 def __init__(self, ui, opener):
410 self.offset = 0
411 super(fsbackend, self).__init__(ui)
411 self.skew = 0
412 self.opener = opener
412 self.rej = []
413 self.fileprinted = False
414 self.printfile(False)
415 self.hunks = 0
416
413
417 def readlines(self, fname):
414 def readlines(self, fname):
418 if os.path.islink(fname):
415 if os.path.islink(fname):
419 return [os.readlink(fname)]
416 return [os.readlink(fname)]
420 fp = self.opener(fname, 'r')
417 fp = self.opener(fname, 'r')
421 try:
418 try:
422 lr = linereader(fp, self.eolmode != 'strict')
419 return list(fp)
423 lines = list(lr)
424 self.eol = lr.eol
425 return lines
426 finally:
420 finally:
427 fp.close()
421 fp.close()
428
422
429 def writelines(self, fname, lines):
423 def writelines(self, fname, lines):
430 # Ensure supplied data ends in fname, being a regular file or
424 # Ensure supplied data ends in fname, being a regular file or
431 # a symlink. _updatedir will -too magically- take care
425 # a symlink. _updatedir will -too magically- take care
432 # of setting it to the proper type afterwards.
426 # of setting it to the proper type afterwards.
433 st_mode = None
427 st_mode = None
434 islink = os.path.islink(fname)
428 islink = os.path.islink(fname)
435 if islink:
429 if islink:
436 fp = cStringIO.StringIO()
430 fp = cStringIO.StringIO()
437 else:
431 else:
438 try:
432 try:
439 st_mode = os.lstat(fname).st_mode & 0777
433 st_mode = os.lstat(fname).st_mode & 0777
440 except OSError, e:
434 except OSError, e:
441 if e.errno != errno.ENOENT:
435 if e.errno != errno.ENOENT:
442 raise
436 raise
443 fp = self.opener(fname, 'w')
437 fp = self.opener(fname, 'w')
444 try:
438 try:
445 if self.eolmode == 'auto':
446 eol = self.eol
447 elif self.eolmode == 'crlf':
448 eol = '\r\n'
449 else:
450 eol = '\n'
451
452 if self.eolmode != 'strict' and eol and eol != '\n':
453 for l in lines:
454 if l and l[-1] == '\n':
455 l = l[:-1] + eol
456 fp.write(l)
457 else:
458 fp.writelines(lines)
439 fp.writelines(lines)
459 if islink:
440 if islink:
460 self.opener.symlink(fp.getvalue(), fname)
441 self.opener.symlink(fp.getvalue(), fname)
461 if st_mode is not None:
442 if st_mode is not None:
462 os.chmod(fname, st_mode)
443 os.chmod(fname, st_mode)
463 finally:
444 finally:
464 fp.close()
445 fp.close()
465
446
466 def unlink(self, fname):
447 def unlink(self, fname):
467 os.unlink(fname)
448 os.unlink(fname)
468
449
450 def writerej(self, fname, failed, total, lines):
451 fname = fname + ".rej"
452 self.ui.warn(
453 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
454 (failed, total, fname))
455 fp = self.opener(fname, 'w')
456 fp.writelines(lines)
457 fp.close()
458
459 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
460 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
461 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
462 eolmodes = ['strict', 'crlf', 'lf', 'auto']
463
464 class patchfile(object):
465 def __init__(self, ui, fname, backend, missing=False, eolmode='strict'):
466 self.fname = fname
467 self.eolmode = eolmode
468 self.eol = None
469 self.backend = backend
470 self.ui = ui
471 self.lines = []
472 self.exists = False
473 self.missing = missing
474 if not missing:
475 try:
476 self.lines = self.backend.readlines(fname)
477 if self.lines:
478 # Normalize line endings
479 if self.lines[0].endswith('\r\n'):
480 self.eol = '\r\n'
481 elif self.lines[0].endswith('\n'):
482 self.eol = '\n'
483 if eolmode != 'strict':
484 nlines = []
485 for l in self.lines:
486 if l.endswith('\r\n'):
487 l = l[:-2] + '\n'
488 nlines.append(l)
489 self.lines = nlines
490 self.exists = True
491 except IOError:
492 pass
493 else:
494 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
495
496 self.hash = {}
497 self.dirty = 0
498 self.offset = 0
499 self.skew = 0
500 self.rej = []
501 self.fileprinted = False
502 self.printfile(False)
503 self.hunks = 0
504
505 def writelines(self, fname, lines):
506 if self.eolmode == 'auto':
507 eol = self.eol
508 elif self.eolmode == 'crlf':
509 eol = '\r\n'
510 else:
511 eol = '\n'
512
513 if self.eolmode != 'strict' and eol and eol != '\n':
514 rawlines = []
515 for l in lines:
516 if l and l[-1] == '\n':
517 l = l[:-1] + eol
518 rawlines.append(l)
519 lines = rawlines
520
521 self.backend.writelines(fname, lines)
522
469 def printfile(self, warn):
523 def printfile(self, warn):
470 if self.fileprinted:
524 if self.fileprinted:
471 return
525 return
472 if warn or self.ui.verbose:
526 if warn or self.ui.verbose:
473 self.fileprinted = True
527 self.fileprinted = True
474 s = _("patching file %s\n") % self.fname
528 s = _("patching file %s\n") % self.fname
475 if warn:
529 if warn:
476 self.ui.warn(s)
530 self.ui.warn(s)
477 else:
531 else:
478 self.ui.note(s)
532 self.ui.note(s)
479
533
480
534
481 def findlines(self, l, linenum):
535 def findlines(self, l, linenum):
482 # looks through the hash and finds candidate lines. The
536 # looks through the hash and finds candidate lines. The
483 # result is a list of line numbers sorted based on distance
537 # result is a list of line numbers sorted based on distance
484 # from linenum
538 # from linenum
485
539
486 cand = self.hash.get(l, [])
540 cand = self.hash.get(l, [])
487 if len(cand) > 1:
541 if len(cand) > 1:
488 # resort our list of potentials forward then back.
542 # resort our list of potentials forward then back.
489 cand.sort(key=lambda x: abs(x - linenum))
543 cand.sort(key=lambda x: abs(x - linenum))
490 return cand
544 return cand
491
545
492 def makerejlines(self, fname):
546 def makerejlines(self, fname):
493 base = os.path.basename(fname)
547 base = os.path.basename(fname)
494 yield "--- %s\n+++ %s\n" % (base, base)
548 yield "--- %s\n+++ %s\n" % (base, base)
495 for x in self.rej:
549 for x in self.rej:
496 for l in x.hunk:
550 for l in x.hunk:
497 yield l
551 yield l
498 if l[-1] != '\n':
552 if l[-1] != '\n':
499 yield "\n\ No newline at end of file\n"
553 yield "\n\ No newline at end of file\n"
500
554
501 def write_rej(self):
555 def write_rej(self):
502 # our rejects are a little different from patch(1). This always
556 # our rejects are a little different from patch(1). This always
503 # creates rejects in the same form as the original patch. A file
557 # creates rejects in the same form as the original patch. A file
504 # header is inserted so that you can run the reject through patch again
558 # header is inserted so that you can run the reject through patch again
505 # without having to type the filename.
559 # without having to type the filename.
506
507 if not self.rej:
560 if not self.rej:
508 return
561 return
509
562 self.backend.writerej(self.fname, len(self.rej), self.hunks,
510 fname = self.fname + ".rej"
563 self.makerejlines(self.fname))
511 self.ui.warn(
512 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
513 (len(self.rej), self.hunks, fname))
514
515 fp = self.opener(fname, 'w')
516 fp.writelines(self.makerejlines(self.fname))
517 fp.close()
518
564
519 def apply(self, h):
565 def apply(self, h):
520 if not h.complete():
566 if not h.complete():
521 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
567 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
522 (h.number, h.desc, len(h.a), h.lena, len(h.b),
568 (h.number, h.desc, len(h.a), h.lena, len(h.b),
523 h.lenb))
569 h.lenb))
524
570
525 self.hunks += 1
571 self.hunks += 1
526
572
527 if self.missing:
573 if self.missing:
528 self.rej.append(h)
574 self.rej.append(h)
529 return -1
575 return -1
530
576
531 if self.exists and h.createfile():
577 if self.exists and h.createfile():
532 self.ui.warn(_("file %s already exists\n") % self.fname)
578 self.ui.warn(_("file %s already exists\n") % self.fname)
533 self.rej.append(h)
579 self.rej.append(h)
534 return -1
580 return -1
535
581
536 if isinstance(h, binhunk):
582 if isinstance(h, binhunk):
537 if h.rmfile():
583 if h.rmfile():
538 self.unlink(self.fname)
584 self.backend.unlink(self.fname)
539 else:
585 else:
540 self.lines[:] = h.new()
586 self.lines[:] = h.new()
541 self.offset += len(h.new())
587 self.offset += len(h.new())
542 self.dirty = True
588 self.dirty = True
543 return 0
589 return 0
544
590
545 horig = h
591 horig = h
546 if (self.eolmode in ('crlf', 'lf')
592 if (self.eolmode in ('crlf', 'lf')
547 or self.eolmode == 'auto' and self.eol):
593 or self.eolmode == 'auto' and self.eol):
548 # If new eols are going to be normalized, then normalize
594 # If new eols are going to be normalized, then normalize
549 # hunk data before patching. Otherwise, preserve input
595 # hunk data before patching. Otherwise, preserve input
550 # line-endings.
596 # line-endings.
551 h = h.getnormalized()
597 h = h.getnormalized()
552
598
553 # fast case first, no offsets, no fuzz
599 # fast case first, no offsets, no fuzz
554 old = h.old()
600 old = h.old()
555 # patch starts counting at 1 unless we are adding the file
601 # patch starts counting at 1 unless we are adding the file
556 if h.starta == 0:
602 if h.starta == 0:
557 start = 0
603 start = 0
558 else:
604 else:
559 start = h.starta + self.offset - 1
605 start = h.starta + self.offset - 1
560 orig_start = start
606 orig_start = start
561 # if there's skew we want to emit the "(offset %d lines)" even
607 # if there's skew we want to emit the "(offset %d lines)" even
562 # when the hunk cleanly applies at start + skew, so skip the
608 # when the hunk cleanly applies at start + skew, so skip the
563 # fast case code
609 # fast case code
564 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
610 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
565 if h.rmfile():
611 if h.rmfile():
566 self.unlink(self.fname)
612 self.backend.unlink(self.fname)
567 else:
613 else:
568 self.lines[start : start + h.lena] = h.new()
614 self.lines[start : start + h.lena] = h.new()
569 self.offset += h.lenb - h.lena
615 self.offset += h.lenb - h.lena
570 self.dirty = True
616 self.dirty = True
571 return 0
617 return 0
572
618
573 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
619 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
574 self.hash = {}
620 self.hash = {}
575 for x, s in enumerate(self.lines):
621 for x, s in enumerate(self.lines):
576 self.hash.setdefault(s, []).append(x)
622 self.hash.setdefault(s, []).append(x)
577 if h.hunk[-1][0] != ' ':
623 if h.hunk[-1][0] != ' ':
578 # if the hunk tried to put something at the bottom of the file
624 # if the hunk tried to put something at the bottom of the file
579 # override the start line and use eof here
625 # override the start line and use eof here
580 search_start = len(self.lines)
626 search_start = len(self.lines)
581 else:
627 else:
582 search_start = orig_start + self.skew
628 search_start = orig_start + self.skew
583
629
584 for fuzzlen in xrange(3):
630 for fuzzlen in xrange(3):
585 for toponly in [True, False]:
631 for toponly in [True, False]:
586 old = h.old(fuzzlen, toponly)
632 old = h.old(fuzzlen, toponly)
587
633
588 cand = self.findlines(old[0][1:], search_start)
634 cand = self.findlines(old[0][1:], search_start)
589 for l in cand:
635 for l in cand:
590 if diffhelpers.testhunk(old, self.lines, l) == 0:
636 if diffhelpers.testhunk(old, self.lines, l) == 0:
591 newlines = h.new(fuzzlen, toponly)
637 newlines = h.new(fuzzlen, toponly)
592 self.lines[l : l + len(old)] = newlines
638 self.lines[l : l + len(old)] = newlines
593 self.offset += len(newlines) - len(old)
639 self.offset += len(newlines) - len(old)
594 self.skew = l - orig_start
640 self.skew = l - orig_start
595 self.dirty = True
641 self.dirty = True
596 offset = l - orig_start - fuzzlen
642 offset = l - orig_start - fuzzlen
597 if fuzzlen:
643 if fuzzlen:
598 msg = _("Hunk #%d succeeded at %d "
644 msg = _("Hunk #%d succeeded at %d "
599 "with fuzz %d "
645 "with fuzz %d "
600 "(offset %d lines).\n")
646 "(offset %d lines).\n")
601 self.printfile(True)
647 self.printfile(True)
602 self.ui.warn(msg %
648 self.ui.warn(msg %
603 (h.number, l + 1, fuzzlen, offset))
649 (h.number, l + 1, fuzzlen, offset))
604 else:
650 else:
605 msg = _("Hunk #%d succeeded at %d "
651 msg = _("Hunk #%d succeeded at %d "
606 "(offset %d lines).\n")
652 "(offset %d lines).\n")
607 self.ui.note(msg % (h.number, l + 1, offset))
653 self.ui.note(msg % (h.number, l + 1, offset))
608 return fuzzlen
654 return fuzzlen
609 self.printfile(True)
655 self.printfile(True)
610 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
656 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
611 self.rej.append(horig)
657 self.rej.append(horig)
612 return -1
658 return -1
613
659
614 def close(self):
660 def close(self):
615 if self.dirty:
661 if self.dirty:
616 self.writelines(self.fname, self.lines)
662 self.writelines(self.fname, self.lines)
617 self.write_rej()
663 self.write_rej()
618 return len(self.rej)
664 return len(self.rej)
619
665
620 class hunk(object):
666 class hunk(object):
621 def __init__(self, desc, num, lr, context, create=False, remove=False):
667 def __init__(self, desc, num, lr, context, create=False, remove=False):
622 self.number = num
668 self.number = num
623 self.desc = desc
669 self.desc = desc
624 self.hunk = [desc]
670 self.hunk = [desc]
625 self.a = []
671 self.a = []
626 self.b = []
672 self.b = []
627 self.starta = self.lena = None
673 self.starta = self.lena = None
628 self.startb = self.lenb = None
674 self.startb = self.lenb = None
629 if lr is not None:
675 if lr is not None:
630 if context:
676 if context:
631 self.read_context_hunk(lr)
677 self.read_context_hunk(lr)
632 else:
678 else:
633 self.read_unified_hunk(lr)
679 self.read_unified_hunk(lr)
634 self.create = create
680 self.create = create
635 self.remove = remove and not create
681 self.remove = remove and not create
636
682
637 def getnormalized(self):
683 def getnormalized(self):
638 """Return a copy with line endings normalized to LF."""
684 """Return a copy with line endings normalized to LF."""
639
685
640 def normalize(lines):
686 def normalize(lines):
641 nlines = []
687 nlines = []
642 for line in lines:
688 for line in lines:
643 if line.endswith('\r\n'):
689 if line.endswith('\r\n'):
644 line = line[:-2] + '\n'
690 line = line[:-2] + '\n'
645 nlines.append(line)
691 nlines.append(line)
646 return nlines
692 return nlines
647
693
648 # Dummy object, it is rebuilt manually
694 # Dummy object, it is rebuilt manually
649 nh = hunk(self.desc, self.number, None, None, False, False)
695 nh = hunk(self.desc, self.number, None, None, False, False)
650 nh.number = self.number
696 nh.number = self.number
651 nh.desc = self.desc
697 nh.desc = self.desc
652 nh.hunk = self.hunk
698 nh.hunk = self.hunk
653 nh.a = normalize(self.a)
699 nh.a = normalize(self.a)
654 nh.b = normalize(self.b)
700 nh.b = normalize(self.b)
655 nh.starta = self.starta
701 nh.starta = self.starta
656 nh.startb = self.startb
702 nh.startb = self.startb
657 nh.lena = self.lena
703 nh.lena = self.lena
658 nh.lenb = self.lenb
704 nh.lenb = self.lenb
659 nh.create = self.create
705 nh.create = self.create
660 nh.remove = self.remove
706 nh.remove = self.remove
661 return nh
707 return nh
662
708
663 def read_unified_hunk(self, lr):
709 def read_unified_hunk(self, lr):
664 m = unidesc.match(self.desc)
710 m = unidesc.match(self.desc)
665 if not m:
711 if not m:
666 raise PatchError(_("bad hunk #%d") % self.number)
712 raise PatchError(_("bad hunk #%d") % self.number)
667 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
713 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
668 if self.lena is None:
714 if self.lena is None:
669 self.lena = 1
715 self.lena = 1
670 else:
716 else:
671 self.lena = int(self.lena)
717 self.lena = int(self.lena)
672 if self.lenb is None:
718 if self.lenb is None:
673 self.lenb = 1
719 self.lenb = 1
674 else:
720 else:
675 self.lenb = int(self.lenb)
721 self.lenb = int(self.lenb)
676 self.starta = int(self.starta)
722 self.starta = int(self.starta)
677 self.startb = int(self.startb)
723 self.startb = int(self.startb)
678 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
724 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
679 # if we hit eof before finishing out the hunk, the last line will
725 # if we hit eof before finishing out the hunk, the last line will
680 # be zero length. Lets try to fix it up.
726 # be zero length. Lets try to fix it up.
681 while len(self.hunk[-1]) == 0:
727 while len(self.hunk[-1]) == 0:
682 del self.hunk[-1]
728 del self.hunk[-1]
683 del self.a[-1]
729 del self.a[-1]
684 del self.b[-1]
730 del self.b[-1]
685 self.lena -= 1
731 self.lena -= 1
686 self.lenb -= 1
732 self.lenb -= 1
687 self._fixnewline(lr)
733 self._fixnewline(lr)
688
734
689 def read_context_hunk(self, lr):
735 def read_context_hunk(self, lr):
690 self.desc = lr.readline()
736 self.desc = lr.readline()
691 m = contextdesc.match(self.desc)
737 m = contextdesc.match(self.desc)
692 if not m:
738 if not m:
693 raise PatchError(_("bad hunk #%d") % self.number)
739 raise PatchError(_("bad hunk #%d") % self.number)
694 foo, self.starta, foo2, aend, foo3 = m.groups()
740 foo, self.starta, foo2, aend, foo3 = m.groups()
695 self.starta = int(self.starta)
741 self.starta = int(self.starta)
696 if aend is None:
742 if aend is None:
697 aend = self.starta
743 aend = self.starta
698 self.lena = int(aend) - self.starta
744 self.lena = int(aend) - self.starta
699 if self.starta:
745 if self.starta:
700 self.lena += 1
746 self.lena += 1
701 for x in xrange(self.lena):
747 for x in xrange(self.lena):
702 l = lr.readline()
748 l = lr.readline()
703 if l.startswith('---'):
749 if l.startswith('---'):
704 # lines addition, old block is empty
750 # lines addition, old block is empty
705 lr.push(l)
751 lr.push(l)
706 break
752 break
707 s = l[2:]
753 s = l[2:]
708 if l.startswith('- ') or l.startswith('! '):
754 if l.startswith('- ') or l.startswith('! '):
709 u = '-' + s
755 u = '-' + s
710 elif l.startswith(' '):
756 elif l.startswith(' '):
711 u = ' ' + s
757 u = ' ' + s
712 else:
758 else:
713 raise PatchError(_("bad hunk #%d old text line %d") %
759 raise PatchError(_("bad hunk #%d old text line %d") %
714 (self.number, x))
760 (self.number, x))
715 self.a.append(u)
761 self.a.append(u)
716 self.hunk.append(u)
762 self.hunk.append(u)
717
763
718 l = lr.readline()
764 l = lr.readline()
719 if l.startswith('\ '):
765 if l.startswith('\ '):
720 s = self.a[-1][:-1]
766 s = self.a[-1][:-1]
721 self.a[-1] = s
767 self.a[-1] = s
722 self.hunk[-1] = s
768 self.hunk[-1] = s
723 l = lr.readline()
769 l = lr.readline()
724 m = contextdesc.match(l)
770 m = contextdesc.match(l)
725 if not m:
771 if not m:
726 raise PatchError(_("bad hunk #%d") % self.number)
772 raise PatchError(_("bad hunk #%d") % self.number)
727 foo, self.startb, foo2, bend, foo3 = m.groups()
773 foo, self.startb, foo2, bend, foo3 = m.groups()
728 self.startb = int(self.startb)
774 self.startb = int(self.startb)
729 if bend is None:
775 if bend is None:
730 bend = self.startb
776 bend = self.startb
731 self.lenb = int(bend) - self.startb
777 self.lenb = int(bend) - self.startb
732 if self.startb:
778 if self.startb:
733 self.lenb += 1
779 self.lenb += 1
734 hunki = 1
780 hunki = 1
735 for x in xrange(self.lenb):
781 for x in xrange(self.lenb):
736 l = lr.readline()
782 l = lr.readline()
737 if l.startswith('\ '):
783 if l.startswith('\ '):
738 # XXX: the only way to hit this is with an invalid line range.
784 # XXX: the only way to hit this is with an invalid line range.
739 # The no-eol marker is not counted in the line range, but I
785 # The no-eol marker is not counted in the line range, but I
740 # guess there are diff(1) out there which behave differently.
786 # guess there are diff(1) out there which behave differently.
741 s = self.b[-1][:-1]
787 s = self.b[-1][:-1]
742 self.b[-1] = s
788 self.b[-1] = s
743 self.hunk[hunki - 1] = s
789 self.hunk[hunki - 1] = s
744 continue
790 continue
745 if not l:
791 if not l:
746 # line deletions, new block is empty and we hit EOF
792 # line deletions, new block is empty and we hit EOF
747 lr.push(l)
793 lr.push(l)
748 break
794 break
749 s = l[2:]
795 s = l[2:]
750 if l.startswith('+ ') or l.startswith('! '):
796 if l.startswith('+ ') or l.startswith('! '):
751 u = '+' + s
797 u = '+' + s
752 elif l.startswith(' '):
798 elif l.startswith(' '):
753 u = ' ' + s
799 u = ' ' + s
754 elif len(self.b) == 0:
800 elif len(self.b) == 0:
755 # line deletions, new block is empty
801 # line deletions, new block is empty
756 lr.push(l)
802 lr.push(l)
757 break
803 break
758 else:
804 else:
759 raise PatchError(_("bad hunk #%d old text line %d") %
805 raise PatchError(_("bad hunk #%d old text line %d") %
760 (self.number, x))
806 (self.number, x))
761 self.b.append(s)
807 self.b.append(s)
762 while True:
808 while True:
763 if hunki >= len(self.hunk):
809 if hunki >= len(self.hunk):
764 h = ""
810 h = ""
765 else:
811 else:
766 h = self.hunk[hunki]
812 h = self.hunk[hunki]
767 hunki += 1
813 hunki += 1
768 if h == u:
814 if h == u:
769 break
815 break
770 elif h.startswith('-'):
816 elif h.startswith('-'):
771 continue
817 continue
772 else:
818 else:
773 self.hunk.insert(hunki - 1, u)
819 self.hunk.insert(hunki - 1, u)
774 break
820 break
775
821
776 if not self.a:
822 if not self.a:
777 # this happens when lines were only added to the hunk
823 # this happens when lines were only added to the hunk
778 for x in self.hunk:
824 for x in self.hunk:
779 if x.startswith('-') or x.startswith(' '):
825 if x.startswith('-') or x.startswith(' '):
780 self.a.append(x)
826 self.a.append(x)
781 if not self.b:
827 if not self.b:
782 # this happens when lines were only deleted from the hunk
828 # this happens when lines were only deleted from the hunk
783 for x in self.hunk:
829 for x in self.hunk:
784 if x.startswith('+') or x.startswith(' '):
830 if x.startswith('+') or x.startswith(' '):
785 self.b.append(x[1:])
831 self.b.append(x[1:])
786 # @@ -start,len +start,len @@
832 # @@ -start,len +start,len @@
787 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
833 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
788 self.startb, self.lenb)
834 self.startb, self.lenb)
789 self.hunk[0] = self.desc
835 self.hunk[0] = self.desc
790 self._fixnewline(lr)
836 self._fixnewline(lr)
791
837
792 def _fixnewline(self, lr):
838 def _fixnewline(self, lr):
793 l = lr.readline()
839 l = lr.readline()
794 if l.startswith('\ '):
840 if l.startswith('\ '):
795 diffhelpers.fix_newline(self.hunk, self.a, self.b)
841 diffhelpers.fix_newline(self.hunk, self.a, self.b)
796 else:
842 else:
797 lr.push(l)
843 lr.push(l)
798
844
799 def complete(self):
845 def complete(self):
800 return len(self.a) == self.lena and len(self.b) == self.lenb
846 return len(self.a) == self.lena and len(self.b) == self.lenb
801
847
802 def createfile(self):
848 def createfile(self):
803 return self.starta == 0 and self.lena == 0 and self.create
849 return self.starta == 0 and self.lena == 0 and self.create
804
850
805 def rmfile(self):
851 def rmfile(self):
806 return self.startb == 0 and self.lenb == 0 and self.remove
852 return self.startb == 0 and self.lenb == 0 and self.remove
807
853
808 def fuzzit(self, l, fuzz, toponly):
854 def fuzzit(self, l, fuzz, toponly):
809 # this removes context lines from the top and bottom of list 'l'. It
855 # this removes context lines from the top and bottom of list 'l'. It
810 # checks the hunk to make sure only context lines are removed, and then
856 # checks the hunk to make sure only context lines are removed, and then
811 # returns a new shortened list of lines.
857 # returns a new shortened list of lines.
812 fuzz = min(fuzz, len(l)-1)
858 fuzz = min(fuzz, len(l)-1)
813 if fuzz:
859 if fuzz:
814 top = 0
860 top = 0
815 bot = 0
861 bot = 0
816 hlen = len(self.hunk)
862 hlen = len(self.hunk)
817 for x in xrange(hlen - 1):
863 for x in xrange(hlen - 1):
818 # the hunk starts with the @@ line, so use x+1
864 # the hunk starts with the @@ line, so use x+1
819 if self.hunk[x + 1][0] == ' ':
865 if self.hunk[x + 1][0] == ' ':
820 top += 1
866 top += 1
821 else:
867 else:
822 break
868 break
823 if not toponly:
869 if not toponly:
824 for x in xrange(hlen - 1):
870 for x in xrange(hlen - 1):
825 if self.hunk[hlen - bot - 1][0] == ' ':
871 if self.hunk[hlen - bot - 1][0] == ' ':
826 bot += 1
872 bot += 1
827 else:
873 else:
828 break
874 break
829
875
830 # top and bot now count context in the hunk
876 # top and bot now count context in the hunk
831 # adjust them if either one is short
877 # adjust them if either one is short
832 context = max(top, bot, 3)
878 context = max(top, bot, 3)
833 if bot < context:
879 if bot < context:
834 bot = max(0, fuzz - (context - bot))
880 bot = max(0, fuzz - (context - bot))
835 else:
881 else:
836 bot = min(fuzz, bot)
882 bot = min(fuzz, bot)
837 if top < context:
883 if top < context:
838 top = max(0, fuzz - (context - top))
884 top = max(0, fuzz - (context - top))
839 else:
885 else:
840 top = min(fuzz, top)
886 top = min(fuzz, top)
841
887
842 return l[top:len(l)-bot]
888 return l[top:len(l)-bot]
843 return l
889 return l
844
890
845 def old(self, fuzz=0, toponly=False):
891 def old(self, fuzz=0, toponly=False):
846 return self.fuzzit(self.a, fuzz, toponly)
892 return self.fuzzit(self.a, fuzz, toponly)
847
893
848 def new(self, fuzz=0, toponly=False):
894 def new(self, fuzz=0, toponly=False):
849 return self.fuzzit(self.b, fuzz, toponly)
895 return self.fuzzit(self.b, fuzz, toponly)
850
896
851 class binhunk:
897 class binhunk:
852 'A binary patch file. Only understands literals so far.'
898 'A binary patch file. Only understands literals so far.'
853 def __init__(self, gitpatch):
899 def __init__(self, gitpatch):
854 self.gitpatch = gitpatch
900 self.gitpatch = gitpatch
855 self.text = None
901 self.text = None
856 self.hunk = ['GIT binary patch\n']
902 self.hunk = ['GIT binary patch\n']
857
903
858 def createfile(self):
904 def createfile(self):
859 return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
905 return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
860
906
861 def rmfile(self):
907 def rmfile(self):
862 return self.gitpatch.op == 'DELETE'
908 return self.gitpatch.op == 'DELETE'
863
909
864 def complete(self):
910 def complete(self):
865 return self.text is not None
911 return self.text is not None
866
912
867 def new(self):
913 def new(self):
868 return [self.text]
914 return [self.text]
869
915
870 def extract(self, lr):
916 def extract(self, lr):
871 line = lr.readline()
917 line = lr.readline()
872 self.hunk.append(line)
918 self.hunk.append(line)
873 while line and not line.startswith('literal '):
919 while line and not line.startswith('literal '):
874 line = lr.readline()
920 line = lr.readline()
875 self.hunk.append(line)
921 self.hunk.append(line)
876 if not line:
922 if not line:
877 raise PatchError(_('could not extract binary patch'))
923 raise PatchError(_('could not extract binary patch'))
878 size = int(line[8:].rstrip())
924 size = int(line[8:].rstrip())
879 dec = []
925 dec = []
880 line = lr.readline()
926 line = lr.readline()
881 self.hunk.append(line)
927 self.hunk.append(line)
882 while len(line) > 1:
928 while len(line) > 1:
883 l = line[0]
929 l = line[0]
884 if l <= 'Z' and l >= 'A':
930 if l <= 'Z' and l >= 'A':
885 l = ord(l) - ord('A') + 1
931 l = ord(l) - ord('A') + 1
886 else:
932 else:
887 l = ord(l) - ord('a') + 27
933 l = ord(l) - ord('a') + 27
888 dec.append(base85.b85decode(line[1:-1])[:l])
934 dec.append(base85.b85decode(line[1:-1])[:l])
889 line = lr.readline()
935 line = lr.readline()
890 self.hunk.append(line)
936 self.hunk.append(line)
891 text = zlib.decompress(''.join(dec))
937 text = zlib.decompress(''.join(dec))
892 if len(text) != size:
938 if len(text) != size:
893 raise PatchError(_('binary patch is %d bytes, not %d') %
939 raise PatchError(_('binary patch is %d bytes, not %d') %
894 len(text), size)
940 len(text), size)
895 self.text = text
941 self.text = text
896
942
897 def parsefilename(str):
943 def parsefilename(str):
898 # --- filename \t|space stuff
944 # --- filename \t|space stuff
899 s = str[4:].rstrip('\r\n')
945 s = str[4:].rstrip('\r\n')
900 i = s.find('\t')
946 i = s.find('\t')
901 if i < 0:
947 if i < 0:
902 i = s.find(' ')
948 i = s.find(' ')
903 if i < 0:
949 if i < 0:
904 return s
950 return s
905 return s[:i]
951 return s[:i]
906
952
907 def pathstrip(path, strip):
953 def pathstrip(path, strip):
908 pathlen = len(path)
954 pathlen = len(path)
909 i = 0
955 i = 0
910 if strip == 0:
956 if strip == 0:
911 return '', path.rstrip()
957 return '', path.rstrip()
912 count = strip
958 count = strip
913 while count > 0:
959 while count > 0:
914 i = path.find('/', i)
960 i = path.find('/', i)
915 if i == -1:
961 if i == -1:
916 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
962 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
917 (count, strip, path))
963 (count, strip, path))
918 i += 1
964 i += 1
919 # consume '//' in the path
965 # consume '//' in the path
920 while i < pathlen - 1 and path[i] == '/':
966 while i < pathlen - 1 and path[i] == '/':
921 i += 1
967 i += 1
922 count -= 1
968 count -= 1
923 return path[:i].lstrip(), path[i:].rstrip()
969 return path[:i].lstrip(), path[i:].rstrip()
924
970
925 def selectfile(afile_orig, bfile_orig, hunk, strip):
971 def selectfile(afile_orig, bfile_orig, hunk, strip):
926 nulla = afile_orig == "/dev/null"
972 nulla = afile_orig == "/dev/null"
927 nullb = bfile_orig == "/dev/null"
973 nullb = bfile_orig == "/dev/null"
928 abase, afile = pathstrip(afile_orig, strip)
974 abase, afile = pathstrip(afile_orig, strip)
929 gooda = not nulla and os.path.lexists(afile)
975 gooda = not nulla and os.path.lexists(afile)
930 bbase, bfile = pathstrip(bfile_orig, strip)
976 bbase, bfile = pathstrip(bfile_orig, strip)
931 if afile == bfile:
977 if afile == bfile:
932 goodb = gooda
978 goodb = gooda
933 else:
979 else:
934 goodb = not nullb and os.path.lexists(bfile)
980 goodb = not nullb and os.path.lexists(bfile)
935 createfunc = hunk.createfile
981 createfunc = hunk.createfile
936 missing = not goodb and not gooda and not createfunc()
982 missing = not goodb and not gooda and not createfunc()
937
983
938 # some diff programs apparently produce patches where the afile is
984 # some diff programs apparently produce patches where the afile is
939 # not /dev/null, but afile starts with bfile
985 # not /dev/null, but afile starts with bfile
940 abasedir = afile[:afile.rfind('/') + 1]
986 abasedir = afile[:afile.rfind('/') + 1]
941 bbasedir = bfile[:bfile.rfind('/') + 1]
987 bbasedir = bfile[:bfile.rfind('/') + 1]
942 if missing and abasedir == bbasedir and afile.startswith(bfile):
988 if missing and abasedir == bbasedir and afile.startswith(bfile):
943 # this isn't very pretty
989 # this isn't very pretty
944 hunk.create = True
990 hunk.create = True
945 if createfunc():
991 if createfunc():
946 missing = False
992 missing = False
947 else:
993 else:
948 hunk.create = False
994 hunk.create = False
949
995
950 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
996 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
951 # diff is between a file and its backup. In this case, the original
997 # diff is between a file and its backup. In this case, the original
952 # file should be patched (see original mpatch code).
998 # file should be patched (see original mpatch code).
953 isbackup = (abase == bbase and bfile.startswith(afile))
999 isbackup = (abase == bbase and bfile.startswith(afile))
954 fname = None
1000 fname = None
955 if not missing:
1001 if not missing:
956 if gooda and goodb:
1002 if gooda and goodb:
957 fname = isbackup and afile or bfile
1003 fname = isbackup and afile or bfile
958 elif gooda:
1004 elif gooda:
959 fname = afile
1005 fname = afile
960
1006
961 if not fname:
1007 if not fname:
962 if not nullb:
1008 if not nullb:
963 fname = isbackup and afile or bfile
1009 fname = isbackup and afile or bfile
964 elif not nulla:
1010 elif not nulla:
965 fname = afile
1011 fname = afile
966 else:
1012 else:
967 raise PatchError(_("undefined source and destination files"))
1013 raise PatchError(_("undefined source and destination files"))
968
1014
969 return fname, missing
1015 return fname, missing
970
1016
971 def scangitpatch(lr, firstline):
1017 def scangitpatch(lr, firstline):
972 """
1018 """
973 Git patches can emit:
1019 Git patches can emit:
974 - rename a to b
1020 - rename a to b
975 - change b
1021 - change b
976 - copy a to c
1022 - copy a to c
977 - change c
1023 - change c
978
1024
979 We cannot apply this sequence as-is, the renamed 'a' could not be
1025 We cannot apply this sequence as-is, the renamed 'a' could not be
980 found for it would have been renamed already. And we cannot copy
1026 found for it would have been renamed already. And we cannot copy
981 from 'b' instead because 'b' would have been changed already. So
1027 from 'b' instead because 'b' would have been changed already. So
982 we scan the git patch for copy and rename commands so we can
1028 we scan the git patch for copy and rename commands so we can
983 perform the copies ahead of time.
1029 perform the copies ahead of time.
984 """
1030 """
985 pos = 0
1031 pos = 0
986 try:
1032 try:
987 pos = lr.fp.tell()
1033 pos = lr.fp.tell()
988 fp = lr.fp
1034 fp = lr.fp
989 except IOError:
1035 except IOError:
990 fp = cStringIO.StringIO(lr.fp.read())
1036 fp = cStringIO.StringIO(lr.fp.read())
991 gitlr = linereader(fp, lr.textmode)
1037 gitlr = linereader(fp, lr.textmode)
992 gitlr.push(firstline)
1038 gitlr.push(firstline)
993 gitpatches = readgitpatch(gitlr)
1039 gitpatches = readgitpatch(gitlr)
994 fp.seek(pos)
1040 fp.seek(pos)
995 return gitpatches
1041 return gitpatches
996
1042
997 def iterhunks(fp):
1043 def iterhunks(fp):
998 """Read a patch and yield the following events:
1044 """Read a patch and yield the following events:
999 - ("file", afile, bfile, firsthunk): select a new target file.
1045 - ("file", afile, bfile, firsthunk): select a new target file.
1000 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1046 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1001 "file" event.
1047 "file" event.
1002 - ("git", gitchanges): current diff is in git format, gitchanges
1048 - ("git", gitchanges): current diff is in git format, gitchanges
1003 maps filenames to gitpatch records. Unique event.
1049 maps filenames to gitpatch records. Unique event.
1004 """
1050 """
1005 changed = {}
1051 changed = {}
1006 afile = ""
1052 afile = ""
1007 bfile = ""
1053 bfile = ""
1008 state = None
1054 state = None
1009 hunknum = 0
1055 hunknum = 0
1010 emitfile = newfile = False
1056 emitfile = newfile = False
1011 git = False
1057 git = False
1012
1058
1013 # our states
1059 # our states
1014 BFILE = 1
1060 BFILE = 1
1015 context = None
1061 context = None
1016 lr = linereader(fp)
1062 lr = linereader(fp)
1017
1063
1018 while True:
1064 while True:
1019 x = lr.readline()
1065 x = lr.readline()
1020 if not x:
1066 if not x:
1021 break
1067 break
1022 if (state == BFILE and ((not context and x[0] == '@') or
1068 if (state == BFILE and ((not context and x[0] == '@') or
1023 ((context is not False) and x.startswith('***************')))):
1069 ((context is not False) and x.startswith('***************')))):
1024 if context is None and x.startswith('***************'):
1070 if context is None and x.startswith('***************'):
1025 context = True
1071 context = True
1026 gpatch = changed.get(bfile)
1072 gpatch = changed.get(bfile)
1027 create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
1073 create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
1028 remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
1074 remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
1029 h = hunk(x, hunknum + 1, lr, context, create, remove)
1075 h = hunk(x, hunknum + 1, lr, context, create, remove)
1030 hunknum += 1
1076 hunknum += 1
1031 if emitfile:
1077 if emitfile:
1032 emitfile = False
1078 emitfile = False
1033 yield 'file', (afile, bfile, h)
1079 yield 'file', (afile, bfile, h)
1034 yield 'hunk', h
1080 yield 'hunk', h
1035 elif state == BFILE and x.startswith('GIT binary patch'):
1081 elif state == BFILE and x.startswith('GIT binary patch'):
1036 h = binhunk(changed[bfile])
1082 h = binhunk(changed[bfile])
1037 hunknum += 1
1083 hunknum += 1
1038 if emitfile:
1084 if emitfile:
1039 emitfile = False
1085 emitfile = False
1040 yield 'file', ('a/' + afile, 'b/' + bfile, h)
1086 yield 'file', ('a/' + afile, 'b/' + bfile, h)
1041 h.extract(lr)
1087 h.extract(lr)
1042 yield 'hunk', h
1088 yield 'hunk', h
1043 elif x.startswith('diff --git'):
1089 elif x.startswith('diff --git'):
1044 # check for git diff, scanning the whole patch file if needed
1090 # check for git diff, scanning the whole patch file if needed
1045 m = gitre.match(x)
1091 m = gitre.match(x)
1046 if m:
1092 if m:
1047 afile, bfile = m.group(1, 2)
1093 afile, bfile = m.group(1, 2)
1048 if not git:
1094 if not git:
1049 git = True
1095 git = True
1050 gitpatches = scangitpatch(lr, x)
1096 gitpatches = scangitpatch(lr, x)
1051 yield 'git', gitpatches
1097 yield 'git', gitpatches
1052 for gp in gitpatches:
1098 for gp in gitpatches:
1053 changed[gp.path] = gp
1099 changed[gp.path] = gp
1054 # else error?
1100 # else error?
1055 # copy/rename + modify should modify target, not source
1101 # copy/rename + modify should modify target, not source
1056 gp = changed.get(bfile)
1102 gp = changed.get(bfile)
1057 if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD')
1103 if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD')
1058 or gp.mode):
1104 or gp.mode):
1059 afile = bfile
1105 afile = bfile
1060 newfile = True
1106 newfile = True
1061 elif x.startswith('---'):
1107 elif x.startswith('---'):
1062 # check for a unified diff
1108 # check for a unified diff
1063 l2 = lr.readline()
1109 l2 = lr.readline()
1064 if not l2.startswith('+++'):
1110 if not l2.startswith('+++'):
1065 lr.push(l2)
1111 lr.push(l2)
1066 continue
1112 continue
1067 newfile = True
1113 newfile = True
1068 context = False
1114 context = False
1069 afile = parsefilename(x)
1115 afile = parsefilename(x)
1070 bfile = parsefilename(l2)
1116 bfile = parsefilename(l2)
1071 elif x.startswith('***'):
1117 elif x.startswith('***'):
1072 # check for a context diff
1118 # check for a context diff
1073 l2 = lr.readline()
1119 l2 = lr.readline()
1074 if not l2.startswith('---'):
1120 if not l2.startswith('---'):
1075 lr.push(l2)
1121 lr.push(l2)
1076 continue
1122 continue
1077 l3 = lr.readline()
1123 l3 = lr.readline()
1078 lr.push(l3)
1124 lr.push(l3)
1079 if not l3.startswith("***************"):
1125 if not l3.startswith("***************"):
1080 lr.push(l2)
1126 lr.push(l2)
1081 continue
1127 continue
1082 newfile = True
1128 newfile = True
1083 context = True
1129 context = True
1084 afile = parsefilename(x)
1130 afile = parsefilename(x)
1085 bfile = parsefilename(l2)
1131 bfile = parsefilename(l2)
1086
1132
1087 if newfile:
1133 if newfile:
1088 newfile = False
1134 newfile = False
1089 emitfile = True
1135 emitfile = True
1090 state = BFILE
1136 state = BFILE
1091 hunknum = 0
1137 hunknum = 0
1092
1138
1093 def applydiff(ui, fp, changed, strip=1, eolmode='strict'):
1139 def applydiff(ui, fp, changed, strip=1, eolmode='strict'):
1094 """Reads a patch from fp and tries to apply it.
1140 """Reads a patch from fp and tries to apply it.
1095
1141
1096 The dict 'changed' is filled in with all of the filenames changed
1142 The dict 'changed' is filled in with all of the filenames changed
1097 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1143 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1098 found and 1 if there was any fuzz.
1144 found and 1 if there was any fuzz.
1099
1145
1100 If 'eolmode' is 'strict', the patch content and patched file are
1146 If 'eolmode' is 'strict', the patch content and patched file are
1101 read in binary mode. Otherwise, line endings are ignored when
1147 read in binary mode. Otherwise, line endings are ignored when
1102 patching then normalized according to 'eolmode'.
1148 patching then normalized according to 'eolmode'.
1103
1149
1104 Callers probably want to call '_updatedir' after this to
1150 Callers probably want to call '_updatedir' after this to
1105 apply certain categories of changes not done by this function.
1151 apply certain categories of changes not done by this function.
1106 """
1152 """
1107 return _applydiff(ui, fp, patchfile, copyfile, changed, strip=strip,
1153 return _applydiff(ui, fp, patchfile, copyfile, changed, strip=strip,
1108 eolmode=eolmode)
1154 eolmode=eolmode)
1109
1155
1110 def _applydiff(ui, fp, patcher, copyfn, changed, strip=1, eolmode='strict'):
1156 def _applydiff(ui, fp, patcher, copyfn, changed, strip=1, eolmode='strict'):
1111 rejects = 0
1157 rejects = 0
1112 err = 0
1158 err = 0
1113 current_file = None
1159 current_file = None
1114 cwd = os.getcwd()
1160 cwd = os.getcwd()
1115 opener = scmutil.opener(cwd)
1161 backend = fsbackend(ui, scmutil.opener(cwd))
1116
1162
1117 for state, values in iterhunks(fp):
1163 for state, values in iterhunks(fp):
1118 if state == 'hunk':
1164 if state == 'hunk':
1119 if not current_file:
1165 if not current_file:
1120 continue
1166 continue
1121 ret = current_file.apply(values)
1167 ret = current_file.apply(values)
1122 if ret >= 0:
1168 if ret >= 0:
1123 changed.setdefault(current_file.fname, None)
1169 changed.setdefault(current_file.fname, None)
1124 if ret > 0:
1170 if ret > 0:
1125 err = 1
1171 err = 1
1126 elif state == 'file':
1172 elif state == 'file':
1127 if current_file:
1173 if current_file:
1128 rejects += current_file.close()
1174 rejects += current_file.close()
1129 afile, bfile, first_hunk = values
1175 afile, bfile, first_hunk = values
1130 try:
1176 try:
1131 current_file, missing = selectfile(afile, bfile,
1177 current_file, missing = selectfile(afile, bfile,
1132 first_hunk, strip)
1178 first_hunk, strip)
1133 current_file = patcher(ui, current_file, opener,
1179 current_file = patcher(ui, current_file, backend,
1134 missing=missing, eolmode=eolmode)
1180 missing=missing, eolmode=eolmode)
1135 except PatchError, inst:
1181 except PatchError, inst:
1136 ui.warn(str(inst) + '\n')
1182 ui.warn(str(inst) + '\n')
1137 current_file = None
1183 current_file = None
1138 rejects += 1
1184 rejects += 1
1139 continue
1185 continue
1140 elif state == 'git':
1186 elif state == 'git':
1141 for gp in values:
1187 for gp in values:
1142 gp.path = pathstrip(gp.path, strip - 1)[1]
1188 gp.path = pathstrip(gp.path, strip - 1)[1]
1143 if gp.oldpath:
1189 if gp.oldpath:
1144 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1190 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1145 # Binary patches really overwrite target files, copying them
1191 # Binary patches really overwrite target files, copying them
1146 # will just make it fails with "target file exists"
1192 # will just make it fails with "target file exists"
1147 if gp.op in ('COPY', 'RENAME') and not gp.binary:
1193 if gp.op in ('COPY', 'RENAME') and not gp.binary:
1148 copyfn(gp.oldpath, gp.path, cwd)
1194 copyfn(gp.oldpath, gp.path, cwd)
1149 changed[gp.path] = gp
1195 changed[gp.path] = gp
1150 else:
1196 else:
1151 raise util.Abort(_('unsupported parser state: %s') % state)
1197 raise util.Abort(_('unsupported parser state: %s') % state)
1152
1198
1153 if current_file:
1199 if current_file:
1154 rejects += current_file.close()
1200 rejects += current_file.close()
1155
1201
1156 if rejects:
1202 if rejects:
1157 return -1
1203 return -1
1158 return err
1204 return err
1159
1205
1160 def _updatedir(ui, repo, patches, similarity=0):
1206 def _updatedir(ui, repo, patches, similarity=0):
1161 '''Update dirstate after patch application according to metadata'''
1207 '''Update dirstate after patch application according to metadata'''
1162 if not patches:
1208 if not patches:
1163 return []
1209 return []
1164 copies = []
1210 copies = []
1165 removes = set()
1211 removes = set()
1166 cfiles = patches.keys()
1212 cfiles = patches.keys()
1167 cwd = repo.getcwd()
1213 cwd = repo.getcwd()
1168 if cwd:
1214 if cwd:
1169 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
1215 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
1170 for f in patches:
1216 for f in patches:
1171 gp = patches[f]
1217 gp = patches[f]
1172 if not gp:
1218 if not gp:
1173 continue
1219 continue
1174 if gp.op == 'RENAME':
1220 if gp.op == 'RENAME':
1175 copies.append((gp.oldpath, gp.path))
1221 copies.append((gp.oldpath, gp.path))
1176 removes.add(gp.oldpath)
1222 removes.add(gp.oldpath)
1177 elif gp.op == 'COPY':
1223 elif gp.op == 'COPY':
1178 copies.append((gp.oldpath, gp.path))
1224 copies.append((gp.oldpath, gp.path))
1179 elif gp.op == 'DELETE':
1225 elif gp.op == 'DELETE':
1180 removes.add(gp.path)
1226 removes.add(gp.path)
1181
1227
1182 wctx = repo[None]
1228 wctx = repo[None]
1183 for src, dst in copies:
1229 for src, dst in copies:
1184 scmutil.dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
1230 scmutil.dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
1185 if (not similarity) and removes:
1231 if (not similarity) and removes:
1186 wctx.remove(sorted(removes), True)
1232 wctx.remove(sorted(removes), True)
1187
1233
1188 for f in patches:
1234 for f in patches:
1189 gp = patches[f]
1235 gp = patches[f]
1190 if gp and gp.mode:
1236 if gp and gp.mode:
1191 islink, isexec = gp.mode
1237 islink, isexec = gp.mode
1192 dst = repo.wjoin(gp.path)
1238 dst = repo.wjoin(gp.path)
1193 # patch won't create empty files
1239 # patch won't create empty files
1194 if gp.op == 'ADD' and not os.path.lexists(dst):
1240 if gp.op == 'ADD' and not os.path.lexists(dst):
1195 flags = (isexec and 'x' or '') + (islink and 'l' or '')
1241 flags = (isexec and 'x' or '') + (islink and 'l' or '')
1196 repo.wwrite(gp.path, '', flags)
1242 repo.wwrite(gp.path, '', flags)
1197 util.setflags(dst, islink, isexec)
1243 util.setflags(dst, islink, isexec)
1198 scmutil.addremove(repo, cfiles, similarity=similarity)
1244 scmutil.addremove(repo, cfiles, similarity=similarity)
1199 files = patches.keys()
1245 files = patches.keys()
1200 files.extend([r for r in removes if r not in files])
1246 files.extend([r for r in removes if r not in files])
1201 return sorted(files)
1247 return sorted(files)
1202
1248
1203 def _externalpatch(patcher, patchname, ui, strip, cwd, files):
1249 def _externalpatch(patcher, patchname, ui, strip, cwd, files):
1204 """use <patcher> to apply <patchname> to the working directory.
1250 """use <patcher> to apply <patchname> to the working directory.
1205 returns whether patch was applied with fuzz factor."""
1251 returns whether patch was applied with fuzz factor."""
1206
1252
1207 fuzz = False
1253 fuzz = False
1208 args = []
1254 args = []
1209 if cwd:
1255 if cwd:
1210 args.append('-d %s' % util.shellquote(cwd))
1256 args.append('-d %s' % util.shellquote(cwd))
1211 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1257 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1212 util.shellquote(patchname)))
1258 util.shellquote(patchname)))
1213
1259
1214 for line in fp:
1260 for line in fp:
1215 line = line.rstrip()
1261 line = line.rstrip()
1216 ui.note(line + '\n')
1262 ui.note(line + '\n')
1217 if line.startswith('patching file '):
1263 if line.startswith('patching file '):
1218 pf = util.parsepatchoutput(line)
1264 pf = util.parsepatchoutput(line)
1219 printed_file = False
1265 printed_file = False
1220 files.setdefault(pf, None)
1266 files.setdefault(pf, None)
1221 elif line.find('with fuzz') >= 0:
1267 elif line.find('with fuzz') >= 0:
1222 fuzz = True
1268 fuzz = True
1223 if not printed_file:
1269 if not printed_file:
1224 ui.warn(pf + '\n')
1270 ui.warn(pf + '\n')
1225 printed_file = True
1271 printed_file = True
1226 ui.warn(line + '\n')
1272 ui.warn(line + '\n')
1227 elif line.find('saving rejects to file') >= 0:
1273 elif line.find('saving rejects to file') >= 0:
1228 ui.warn(line + '\n')
1274 ui.warn(line + '\n')
1229 elif line.find('FAILED') >= 0:
1275 elif line.find('FAILED') >= 0:
1230 if not printed_file:
1276 if not printed_file:
1231 ui.warn(pf + '\n')
1277 ui.warn(pf + '\n')
1232 printed_file = True
1278 printed_file = True
1233 ui.warn(line + '\n')
1279 ui.warn(line + '\n')
1234 code = fp.close()
1280 code = fp.close()
1235 if code:
1281 if code:
1236 raise PatchError(_("patch command failed: %s") %
1282 raise PatchError(_("patch command failed: %s") %
1237 util.explainexit(code)[0])
1283 util.explainexit(code)[0])
1238 return fuzz
1284 return fuzz
1239
1285
1240 def internalpatch(ui, repo, patchobj, strip, cwd, files=None, eolmode='strict',
1286 def internalpatch(ui, repo, patchobj, strip, cwd, files=None, eolmode='strict',
1241 similarity=0):
1287 similarity=0):
1242 """use builtin patch to apply <patchobj> to the working directory.
1288 """use builtin patch to apply <patchobj> to the working directory.
1243 returns whether patch was applied with fuzz factor."""
1289 returns whether patch was applied with fuzz factor."""
1244
1290
1245 if files is None:
1291 if files is None:
1246 files = {}
1292 files = {}
1247 if eolmode is None:
1293 if eolmode is None:
1248 eolmode = ui.config('patch', 'eol', 'strict')
1294 eolmode = ui.config('patch', 'eol', 'strict')
1249 if eolmode.lower() not in eolmodes:
1295 if eolmode.lower() not in eolmodes:
1250 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1296 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1251 eolmode = eolmode.lower()
1297 eolmode = eolmode.lower()
1252
1298
1253 try:
1299 try:
1254 fp = open(patchobj, 'rb')
1300 fp = open(patchobj, 'rb')
1255 except TypeError:
1301 except TypeError:
1256 fp = patchobj
1302 fp = patchobj
1257 if cwd:
1303 if cwd:
1258 curdir = os.getcwd()
1304 curdir = os.getcwd()
1259 os.chdir(cwd)
1305 os.chdir(cwd)
1260 try:
1306 try:
1261 ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode)
1307 ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode)
1262 finally:
1308 finally:
1263 if cwd:
1309 if cwd:
1264 os.chdir(curdir)
1310 os.chdir(curdir)
1265 if fp != patchobj:
1311 if fp != patchobj:
1266 fp.close()
1312 fp.close()
1267 touched = _updatedir(ui, repo, files, similarity)
1313 touched = _updatedir(ui, repo, files, similarity)
1268 files.update(dict.fromkeys(touched))
1314 files.update(dict.fromkeys(touched))
1269 if ret < 0:
1315 if ret < 0:
1270 raise PatchError(_('patch failed to apply'))
1316 raise PatchError(_('patch failed to apply'))
1271 return ret > 0
1317 return ret > 0
1272
1318
1273 def patch(ui, repo, patchname, strip=1, cwd=None, files=None, eolmode='strict',
1319 def patch(ui, repo, patchname, strip=1, cwd=None, files=None, eolmode='strict',
1274 similarity=0):
1320 similarity=0):
1275 """Apply <patchname> to the working directory.
1321 """Apply <patchname> to the working directory.
1276
1322
1277 'eolmode' specifies how end of lines should be handled. It can be:
1323 'eolmode' specifies how end of lines should be handled. It can be:
1278 - 'strict': inputs are read in binary mode, EOLs are preserved
1324 - 'strict': inputs are read in binary mode, EOLs are preserved
1279 - 'crlf': EOLs are ignored when patching and reset to CRLF
1325 - 'crlf': EOLs are ignored when patching and reset to CRLF
1280 - 'lf': EOLs are ignored when patching and reset to LF
1326 - 'lf': EOLs are ignored when patching and reset to LF
1281 - None: get it from user settings, default to 'strict'
1327 - None: get it from user settings, default to 'strict'
1282 'eolmode' is ignored when using an external patcher program.
1328 'eolmode' is ignored when using an external patcher program.
1283
1329
1284 Returns whether patch was applied with fuzz factor.
1330 Returns whether patch was applied with fuzz factor.
1285 """
1331 """
1286 patcher = ui.config('ui', 'patch')
1332 patcher = ui.config('ui', 'patch')
1287 if files is None:
1333 if files is None:
1288 files = {}
1334 files = {}
1289 try:
1335 try:
1290 if patcher:
1336 if patcher:
1291 try:
1337 try:
1292 return _externalpatch(patcher, patchname, ui, strip, cwd,
1338 return _externalpatch(patcher, patchname, ui, strip, cwd,
1293 files)
1339 files)
1294 finally:
1340 finally:
1295 touched = _updatedir(ui, repo, files, similarity)
1341 touched = _updatedir(ui, repo, files, similarity)
1296 files.update(dict.fromkeys(touched))
1342 files.update(dict.fromkeys(touched))
1297 return internalpatch(ui, repo, patchname, strip, cwd, files, eolmode,
1343 return internalpatch(ui, repo, patchname, strip, cwd, files, eolmode,
1298 similarity)
1344 similarity)
1299 except PatchError, err:
1345 except PatchError, err:
1300 raise util.Abort(str(err))
1346 raise util.Abort(str(err))
1301
1347
1302 def changedfiles(patchpath, strip=1):
1348 def changedfiles(patchpath, strip=1):
1303 fp = open(patchpath, 'rb')
1349 fp = open(patchpath, 'rb')
1304 try:
1350 try:
1305 changed = set()
1351 changed = set()
1306 for state, values in iterhunks(fp):
1352 for state, values in iterhunks(fp):
1307 if state == 'hunk':
1353 if state == 'hunk':
1308 continue
1354 continue
1309 elif state == 'file':
1355 elif state == 'file':
1310 afile, bfile, first_hunk = values
1356 afile, bfile, first_hunk = values
1311 current_file, missing = selectfile(afile, bfile,
1357 current_file, missing = selectfile(afile, bfile,
1312 first_hunk, strip)
1358 first_hunk, strip)
1313 changed.add(current_file)
1359 changed.add(current_file)
1314 elif state == 'git':
1360 elif state == 'git':
1315 for gp in values:
1361 for gp in values:
1316 gp.path = pathstrip(gp.path, strip - 1)[1]
1362 gp.path = pathstrip(gp.path, strip - 1)[1]
1317 changed.add(gp.path)
1363 changed.add(gp.path)
1318 if gp.oldpath:
1364 if gp.oldpath:
1319 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1365 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1320 if gp.op == 'RENAME':
1366 if gp.op == 'RENAME':
1321 changed.add(gp.oldpath)
1367 changed.add(gp.oldpath)
1322 else:
1368 else:
1323 raise util.Abort(_('unsupported parser state: %s') % state)
1369 raise util.Abort(_('unsupported parser state: %s') % state)
1324 return changed
1370 return changed
1325 finally:
1371 finally:
1326 fp.close()
1372 fp.close()
1327
1373
1328 def b85diff(to, tn):
1374 def b85diff(to, tn):
1329 '''print base85-encoded binary diff'''
1375 '''print base85-encoded binary diff'''
1330 def gitindex(text):
1376 def gitindex(text):
1331 if not text:
1377 if not text:
1332 return hex(nullid)
1378 return hex(nullid)
1333 l = len(text)
1379 l = len(text)
1334 s = util.sha1('blob %d\0' % l)
1380 s = util.sha1('blob %d\0' % l)
1335 s.update(text)
1381 s.update(text)
1336 return s.hexdigest()
1382 return s.hexdigest()
1337
1383
1338 def fmtline(line):
1384 def fmtline(line):
1339 l = len(line)
1385 l = len(line)
1340 if l <= 26:
1386 if l <= 26:
1341 l = chr(ord('A') + l - 1)
1387 l = chr(ord('A') + l - 1)
1342 else:
1388 else:
1343 l = chr(l - 26 + ord('a') - 1)
1389 l = chr(l - 26 + ord('a') - 1)
1344 return '%c%s\n' % (l, base85.b85encode(line, True))
1390 return '%c%s\n' % (l, base85.b85encode(line, True))
1345
1391
1346 def chunk(text, csize=52):
1392 def chunk(text, csize=52):
1347 l = len(text)
1393 l = len(text)
1348 i = 0
1394 i = 0
1349 while i < l:
1395 while i < l:
1350 yield text[i:i + csize]
1396 yield text[i:i + csize]
1351 i += csize
1397 i += csize
1352
1398
1353 tohash = gitindex(to)
1399 tohash = gitindex(to)
1354 tnhash = gitindex(tn)
1400 tnhash = gitindex(tn)
1355 if tohash == tnhash:
1401 if tohash == tnhash:
1356 return ""
1402 return ""
1357
1403
1358 # TODO: deltas
1404 # TODO: deltas
1359 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1405 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1360 (tohash, tnhash, len(tn))]
1406 (tohash, tnhash, len(tn))]
1361 for l in chunk(zlib.compress(tn)):
1407 for l in chunk(zlib.compress(tn)):
1362 ret.append(fmtline(l))
1408 ret.append(fmtline(l))
1363 ret.append('\n')
1409 ret.append('\n')
1364 return ''.join(ret)
1410 return ''.join(ret)
1365
1411
1366 class GitDiffRequired(Exception):
1412 class GitDiffRequired(Exception):
1367 pass
1413 pass
1368
1414
1369 def diffopts(ui, opts=None, untrusted=False):
1415 def diffopts(ui, opts=None, untrusted=False):
1370 def get(key, name=None, getter=ui.configbool):
1416 def get(key, name=None, getter=ui.configbool):
1371 return ((opts and opts.get(key)) or
1417 return ((opts and opts.get(key)) or
1372 getter('diff', name or key, None, untrusted=untrusted))
1418 getter('diff', name or key, None, untrusted=untrusted))
1373 return mdiff.diffopts(
1419 return mdiff.diffopts(
1374 text=opts and opts.get('text'),
1420 text=opts and opts.get('text'),
1375 git=get('git'),
1421 git=get('git'),
1376 nodates=get('nodates'),
1422 nodates=get('nodates'),
1377 showfunc=get('show_function', 'showfunc'),
1423 showfunc=get('show_function', 'showfunc'),
1378 ignorews=get('ignore_all_space', 'ignorews'),
1424 ignorews=get('ignore_all_space', 'ignorews'),
1379 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1425 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1380 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1426 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1381 context=get('unified', getter=ui.config))
1427 context=get('unified', getter=ui.config))
1382
1428
1383 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1429 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1384 losedatafn=None, prefix=''):
1430 losedatafn=None, prefix=''):
1385 '''yields diff of changes to files between two nodes, or node and
1431 '''yields diff of changes to files between two nodes, or node and
1386 working directory.
1432 working directory.
1387
1433
1388 if node1 is None, use first dirstate parent instead.
1434 if node1 is None, use first dirstate parent instead.
1389 if node2 is None, compare node1 with working directory.
1435 if node2 is None, compare node1 with working directory.
1390
1436
1391 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1437 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1392 every time some change cannot be represented with the current
1438 every time some change cannot be represented with the current
1393 patch format. Return False to upgrade to git patch format, True to
1439 patch format. Return False to upgrade to git patch format, True to
1394 accept the loss or raise an exception to abort the diff. It is
1440 accept the loss or raise an exception to abort the diff. It is
1395 called with the name of current file being diffed as 'fn'. If set
1441 called with the name of current file being diffed as 'fn'. If set
1396 to None, patches will always be upgraded to git format when
1442 to None, patches will always be upgraded to git format when
1397 necessary.
1443 necessary.
1398
1444
1399 prefix is a filename prefix that is prepended to all filenames on
1445 prefix is a filename prefix that is prepended to all filenames on
1400 display (used for subrepos).
1446 display (used for subrepos).
1401 '''
1447 '''
1402
1448
1403 if opts is None:
1449 if opts is None:
1404 opts = mdiff.defaultopts
1450 opts = mdiff.defaultopts
1405
1451
1406 if not node1 and not node2:
1452 if not node1 and not node2:
1407 node1 = repo.dirstate.p1()
1453 node1 = repo.dirstate.p1()
1408
1454
1409 def lrugetfilectx():
1455 def lrugetfilectx():
1410 cache = {}
1456 cache = {}
1411 order = []
1457 order = []
1412 def getfilectx(f, ctx):
1458 def getfilectx(f, ctx):
1413 fctx = ctx.filectx(f, filelog=cache.get(f))
1459 fctx = ctx.filectx(f, filelog=cache.get(f))
1414 if f not in cache:
1460 if f not in cache:
1415 if len(cache) > 20:
1461 if len(cache) > 20:
1416 del cache[order.pop(0)]
1462 del cache[order.pop(0)]
1417 cache[f] = fctx.filelog()
1463 cache[f] = fctx.filelog()
1418 else:
1464 else:
1419 order.remove(f)
1465 order.remove(f)
1420 order.append(f)
1466 order.append(f)
1421 return fctx
1467 return fctx
1422 return getfilectx
1468 return getfilectx
1423 getfilectx = lrugetfilectx()
1469 getfilectx = lrugetfilectx()
1424
1470
1425 ctx1 = repo[node1]
1471 ctx1 = repo[node1]
1426 ctx2 = repo[node2]
1472 ctx2 = repo[node2]
1427
1473
1428 if not changes:
1474 if not changes:
1429 changes = repo.status(ctx1, ctx2, match=match)
1475 changes = repo.status(ctx1, ctx2, match=match)
1430 modified, added, removed = changes[:3]
1476 modified, added, removed = changes[:3]
1431
1477
1432 if not modified and not added and not removed:
1478 if not modified and not added and not removed:
1433 return []
1479 return []
1434
1480
1435 revs = None
1481 revs = None
1436 if not repo.ui.quiet:
1482 if not repo.ui.quiet:
1437 hexfunc = repo.ui.debugflag and hex or short
1483 hexfunc = repo.ui.debugflag and hex or short
1438 revs = [hexfunc(node) for node in [node1, node2] if node]
1484 revs = [hexfunc(node) for node in [node1, node2] if node]
1439
1485
1440 copy = {}
1486 copy = {}
1441 if opts.git or opts.upgrade:
1487 if opts.git or opts.upgrade:
1442 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1488 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1443
1489
1444 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1490 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1445 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1491 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1446 if opts.upgrade and not opts.git:
1492 if opts.upgrade and not opts.git:
1447 try:
1493 try:
1448 def losedata(fn):
1494 def losedata(fn):
1449 if not losedatafn or not losedatafn(fn=fn):
1495 if not losedatafn or not losedatafn(fn=fn):
1450 raise GitDiffRequired()
1496 raise GitDiffRequired()
1451 # Buffer the whole output until we are sure it can be generated
1497 # Buffer the whole output until we are sure it can be generated
1452 return list(difffn(opts.copy(git=False), losedata))
1498 return list(difffn(opts.copy(git=False), losedata))
1453 except GitDiffRequired:
1499 except GitDiffRequired:
1454 return difffn(opts.copy(git=True), None)
1500 return difffn(opts.copy(git=True), None)
1455 else:
1501 else:
1456 return difffn(opts, None)
1502 return difffn(opts, None)
1457
1503
1458 def difflabel(func, *args, **kw):
1504 def difflabel(func, *args, **kw):
1459 '''yields 2-tuples of (output, label) based on the output of func()'''
1505 '''yields 2-tuples of (output, label) based on the output of func()'''
1460 prefixes = [('diff', 'diff.diffline'),
1506 prefixes = [('diff', 'diff.diffline'),
1461 ('copy', 'diff.extended'),
1507 ('copy', 'diff.extended'),
1462 ('rename', 'diff.extended'),
1508 ('rename', 'diff.extended'),
1463 ('old', 'diff.extended'),
1509 ('old', 'diff.extended'),
1464 ('new', 'diff.extended'),
1510 ('new', 'diff.extended'),
1465 ('deleted', 'diff.extended'),
1511 ('deleted', 'diff.extended'),
1466 ('---', 'diff.file_a'),
1512 ('---', 'diff.file_a'),
1467 ('+++', 'diff.file_b'),
1513 ('+++', 'diff.file_b'),
1468 ('@@', 'diff.hunk'),
1514 ('@@', 'diff.hunk'),
1469 ('-', 'diff.deleted'),
1515 ('-', 'diff.deleted'),
1470 ('+', 'diff.inserted')]
1516 ('+', 'diff.inserted')]
1471
1517
1472 for chunk in func(*args, **kw):
1518 for chunk in func(*args, **kw):
1473 lines = chunk.split('\n')
1519 lines = chunk.split('\n')
1474 for i, line in enumerate(lines):
1520 for i, line in enumerate(lines):
1475 if i != 0:
1521 if i != 0:
1476 yield ('\n', '')
1522 yield ('\n', '')
1477 stripline = line
1523 stripline = line
1478 if line and line[0] in '+-':
1524 if line and line[0] in '+-':
1479 # highlight trailing whitespace, but only in changed lines
1525 # highlight trailing whitespace, but only in changed lines
1480 stripline = line.rstrip()
1526 stripline = line.rstrip()
1481 for prefix, label in prefixes:
1527 for prefix, label in prefixes:
1482 if stripline.startswith(prefix):
1528 if stripline.startswith(prefix):
1483 yield (stripline, label)
1529 yield (stripline, label)
1484 break
1530 break
1485 else:
1531 else:
1486 yield (line, '')
1532 yield (line, '')
1487 if line != stripline:
1533 if line != stripline:
1488 yield (line[len(stripline):], 'diff.trailingwhitespace')
1534 yield (line[len(stripline):], 'diff.trailingwhitespace')
1489
1535
1490 def diffui(*args, **kw):
1536 def diffui(*args, **kw):
1491 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1537 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1492 return difflabel(diff, *args, **kw)
1538 return difflabel(diff, *args, **kw)
1493
1539
1494
1540
1495 def _addmodehdr(header, omode, nmode):
1541 def _addmodehdr(header, omode, nmode):
1496 if omode != nmode:
1542 if omode != nmode:
1497 header.append('old mode %s\n' % omode)
1543 header.append('old mode %s\n' % omode)
1498 header.append('new mode %s\n' % nmode)
1544 header.append('new mode %s\n' % nmode)
1499
1545
1500 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1546 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1501 copy, getfilectx, opts, losedatafn, prefix):
1547 copy, getfilectx, opts, losedatafn, prefix):
1502
1548
1503 def join(f):
1549 def join(f):
1504 return os.path.join(prefix, f)
1550 return os.path.join(prefix, f)
1505
1551
1506 date1 = util.datestr(ctx1.date())
1552 date1 = util.datestr(ctx1.date())
1507 man1 = ctx1.manifest()
1553 man1 = ctx1.manifest()
1508
1554
1509 gone = set()
1555 gone = set()
1510 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1556 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1511
1557
1512 copyto = dict([(v, k) for k, v in copy.items()])
1558 copyto = dict([(v, k) for k, v in copy.items()])
1513
1559
1514 if opts.git:
1560 if opts.git:
1515 revs = None
1561 revs = None
1516
1562
1517 for f in sorted(modified + added + removed):
1563 for f in sorted(modified + added + removed):
1518 to = None
1564 to = None
1519 tn = None
1565 tn = None
1520 dodiff = True
1566 dodiff = True
1521 header = []
1567 header = []
1522 if f in man1:
1568 if f in man1:
1523 to = getfilectx(f, ctx1).data()
1569 to = getfilectx(f, ctx1).data()
1524 if f not in removed:
1570 if f not in removed:
1525 tn = getfilectx(f, ctx2).data()
1571 tn = getfilectx(f, ctx2).data()
1526 a, b = f, f
1572 a, b = f, f
1527 if opts.git or losedatafn:
1573 if opts.git or losedatafn:
1528 if f in added:
1574 if f in added:
1529 mode = gitmode[ctx2.flags(f)]
1575 mode = gitmode[ctx2.flags(f)]
1530 if f in copy or f in copyto:
1576 if f in copy or f in copyto:
1531 if opts.git:
1577 if opts.git:
1532 if f in copy:
1578 if f in copy:
1533 a = copy[f]
1579 a = copy[f]
1534 else:
1580 else:
1535 a = copyto[f]
1581 a = copyto[f]
1536 omode = gitmode[man1.flags(a)]
1582 omode = gitmode[man1.flags(a)]
1537 _addmodehdr(header, omode, mode)
1583 _addmodehdr(header, omode, mode)
1538 if a in removed and a not in gone:
1584 if a in removed and a not in gone:
1539 op = 'rename'
1585 op = 'rename'
1540 gone.add(a)
1586 gone.add(a)
1541 else:
1587 else:
1542 op = 'copy'
1588 op = 'copy'
1543 header.append('%s from %s\n' % (op, join(a)))
1589 header.append('%s from %s\n' % (op, join(a)))
1544 header.append('%s to %s\n' % (op, join(f)))
1590 header.append('%s to %s\n' % (op, join(f)))
1545 to = getfilectx(a, ctx1).data()
1591 to = getfilectx(a, ctx1).data()
1546 else:
1592 else:
1547 losedatafn(f)
1593 losedatafn(f)
1548 else:
1594 else:
1549 if opts.git:
1595 if opts.git:
1550 header.append('new file mode %s\n' % mode)
1596 header.append('new file mode %s\n' % mode)
1551 elif ctx2.flags(f):
1597 elif ctx2.flags(f):
1552 losedatafn(f)
1598 losedatafn(f)
1553 # In theory, if tn was copied or renamed we should check
1599 # In theory, if tn was copied or renamed we should check
1554 # if the source is binary too but the copy record already
1600 # if the source is binary too but the copy record already
1555 # forces git mode.
1601 # forces git mode.
1556 if util.binary(tn):
1602 if util.binary(tn):
1557 if opts.git:
1603 if opts.git:
1558 dodiff = 'binary'
1604 dodiff = 'binary'
1559 else:
1605 else:
1560 losedatafn(f)
1606 losedatafn(f)
1561 if not opts.git and not tn:
1607 if not opts.git and not tn:
1562 # regular diffs cannot represent new empty file
1608 # regular diffs cannot represent new empty file
1563 losedatafn(f)
1609 losedatafn(f)
1564 elif f in removed:
1610 elif f in removed:
1565 if opts.git:
1611 if opts.git:
1566 # have we already reported a copy above?
1612 # have we already reported a copy above?
1567 if ((f in copy and copy[f] in added
1613 if ((f in copy and copy[f] in added
1568 and copyto[copy[f]] == f) or
1614 and copyto[copy[f]] == f) or
1569 (f in copyto and copyto[f] in added
1615 (f in copyto and copyto[f] in added
1570 and copy[copyto[f]] == f)):
1616 and copy[copyto[f]] == f)):
1571 dodiff = False
1617 dodiff = False
1572 else:
1618 else:
1573 header.append('deleted file mode %s\n' %
1619 header.append('deleted file mode %s\n' %
1574 gitmode[man1.flags(f)])
1620 gitmode[man1.flags(f)])
1575 elif not to or util.binary(to):
1621 elif not to or util.binary(to):
1576 # regular diffs cannot represent empty file deletion
1622 # regular diffs cannot represent empty file deletion
1577 losedatafn(f)
1623 losedatafn(f)
1578 else:
1624 else:
1579 oflag = man1.flags(f)
1625 oflag = man1.flags(f)
1580 nflag = ctx2.flags(f)
1626 nflag = ctx2.flags(f)
1581 binary = util.binary(to) or util.binary(tn)
1627 binary = util.binary(to) or util.binary(tn)
1582 if opts.git:
1628 if opts.git:
1583 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1629 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1584 if binary:
1630 if binary:
1585 dodiff = 'binary'
1631 dodiff = 'binary'
1586 elif binary or nflag != oflag:
1632 elif binary or nflag != oflag:
1587 losedatafn(f)
1633 losedatafn(f)
1588 if opts.git:
1634 if opts.git:
1589 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1635 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1590
1636
1591 if dodiff:
1637 if dodiff:
1592 if dodiff == 'binary':
1638 if dodiff == 'binary':
1593 text = b85diff(to, tn)
1639 text = b85diff(to, tn)
1594 else:
1640 else:
1595 text = mdiff.unidiff(to, date1,
1641 text = mdiff.unidiff(to, date1,
1596 # ctx2 date may be dynamic
1642 # ctx2 date may be dynamic
1597 tn, util.datestr(ctx2.date()),
1643 tn, util.datestr(ctx2.date()),
1598 join(a), join(b), revs, opts=opts)
1644 join(a), join(b), revs, opts=opts)
1599 if header and (text or len(header) > 1):
1645 if header and (text or len(header) > 1):
1600 yield ''.join(header)
1646 yield ''.join(header)
1601 if text:
1647 if text:
1602 yield text
1648 yield text
1603
1649
1604 def diffstatdata(lines):
1650 def diffstatdata(lines):
1605 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1651 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1606
1652
1607 filename, adds, removes = None, 0, 0
1653 filename, adds, removes = None, 0, 0
1608 for line in lines:
1654 for line in lines:
1609 if line.startswith('diff'):
1655 if line.startswith('diff'):
1610 if filename:
1656 if filename:
1611 isbinary = adds == 0 and removes == 0
1657 isbinary = adds == 0 and removes == 0
1612 yield (filename, adds, removes, isbinary)
1658 yield (filename, adds, removes, isbinary)
1613 # set numbers to 0 anyway when starting new file
1659 # set numbers to 0 anyway when starting new file
1614 adds, removes = 0, 0
1660 adds, removes = 0, 0
1615 if line.startswith('diff --git'):
1661 if line.startswith('diff --git'):
1616 filename = gitre.search(line).group(1)
1662 filename = gitre.search(line).group(1)
1617 elif line.startswith('diff -r'):
1663 elif line.startswith('diff -r'):
1618 # format: "diff -r ... -r ... filename"
1664 # format: "diff -r ... -r ... filename"
1619 filename = diffre.search(line).group(1)
1665 filename = diffre.search(line).group(1)
1620 elif line.startswith('+') and not line.startswith('+++'):
1666 elif line.startswith('+') and not line.startswith('+++'):
1621 adds += 1
1667 adds += 1
1622 elif line.startswith('-') and not line.startswith('---'):
1668 elif line.startswith('-') and not line.startswith('---'):
1623 removes += 1
1669 removes += 1
1624 if filename:
1670 if filename:
1625 isbinary = adds == 0 and removes == 0
1671 isbinary = adds == 0 and removes == 0
1626 yield (filename, adds, removes, isbinary)
1672 yield (filename, adds, removes, isbinary)
1627
1673
1628 def diffstat(lines, width=80, git=False):
1674 def diffstat(lines, width=80, git=False):
1629 output = []
1675 output = []
1630 stats = list(diffstatdata(lines))
1676 stats = list(diffstatdata(lines))
1631
1677
1632 maxtotal, maxname = 0, 0
1678 maxtotal, maxname = 0, 0
1633 totaladds, totalremoves = 0, 0
1679 totaladds, totalremoves = 0, 0
1634 hasbinary = False
1680 hasbinary = False
1635
1681
1636 sized = [(filename, adds, removes, isbinary, encoding.colwidth(filename))
1682 sized = [(filename, adds, removes, isbinary, encoding.colwidth(filename))
1637 for filename, adds, removes, isbinary in stats]
1683 for filename, adds, removes, isbinary in stats]
1638
1684
1639 for filename, adds, removes, isbinary, namewidth in sized:
1685 for filename, adds, removes, isbinary, namewidth in sized:
1640 totaladds += adds
1686 totaladds += adds
1641 totalremoves += removes
1687 totalremoves += removes
1642 maxname = max(maxname, namewidth)
1688 maxname = max(maxname, namewidth)
1643 maxtotal = max(maxtotal, adds + removes)
1689 maxtotal = max(maxtotal, adds + removes)
1644 if isbinary:
1690 if isbinary:
1645 hasbinary = True
1691 hasbinary = True
1646
1692
1647 countwidth = len(str(maxtotal))
1693 countwidth = len(str(maxtotal))
1648 if hasbinary and countwidth < 3:
1694 if hasbinary and countwidth < 3:
1649 countwidth = 3
1695 countwidth = 3
1650 graphwidth = width - countwidth - maxname - 6
1696 graphwidth = width - countwidth - maxname - 6
1651 if graphwidth < 10:
1697 if graphwidth < 10:
1652 graphwidth = 10
1698 graphwidth = 10
1653
1699
1654 def scale(i):
1700 def scale(i):
1655 if maxtotal <= graphwidth:
1701 if maxtotal <= graphwidth:
1656 return i
1702 return i
1657 # If diffstat runs out of room it doesn't print anything,
1703 # If diffstat runs out of room it doesn't print anything,
1658 # which isn't very useful, so always print at least one + or -
1704 # which isn't very useful, so always print at least one + or -
1659 # if there were at least some changes.
1705 # if there were at least some changes.
1660 return max(i * graphwidth // maxtotal, int(bool(i)))
1706 return max(i * graphwidth // maxtotal, int(bool(i)))
1661
1707
1662 for filename, adds, removes, isbinary, namewidth in sized:
1708 for filename, adds, removes, isbinary, namewidth in sized:
1663 if git and isbinary:
1709 if git and isbinary:
1664 count = 'Bin'
1710 count = 'Bin'
1665 else:
1711 else:
1666 count = adds + removes
1712 count = adds + removes
1667 pluses = '+' * scale(adds)
1713 pluses = '+' * scale(adds)
1668 minuses = '-' * scale(removes)
1714 minuses = '-' * scale(removes)
1669 output.append(' %s%s | %*s %s%s\n' %
1715 output.append(' %s%s | %*s %s%s\n' %
1670 (filename, ' ' * (maxname - namewidth),
1716 (filename, ' ' * (maxname - namewidth),
1671 countwidth, count,
1717 countwidth, count,
1672 pluses, minuses))
1718 pluses, minuses))
1673
1719
1674 if stats:
1720 if stats:
1675 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1721 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1676 % (len(stats), totaladds, totalremoves))
1722 % (len(stats), totaladds, totalremoves))
1677
1723
1678 return ''.join(output)
1724 return ''.join(output)
1679
1725
1680 def diffstatui(*args, **kw):
1726 def diffstatui(*args, **kw):
1681 '''like diffstat(), but yields 2-tuples of (output, label) for
1727 '''like diffstat(), but yields 2-tuples of (output, label) for
1682 ui.write()
1728 ui.write()
1683 '''
1729 '''
1684
1730
1685 for line in diffstat(*args, **kw).splitlines():
1731 for line in diffstat(*args, **kw).splitlines():
1686 if line and line[-1] in '+-':
1732 if line and line[-1] in '+-':
1687 name, graph = line.rsplit(' ', 1)
1733 name, graph = line.rsplit(' ', 1)
1688 yield (name + ' ', '')
1734 yield (name + ' ', '')
1689 m = re.search(r'\++', graph)
1735 m = re.search(r'\++', graph)
1690 if m:
1736 if m:
1691 yield (m.group(0), 'diffstat.inserted')
1737 yield (m.group(0), 'diffstat.inserted')
1692 m = re.search(r'-+', graph)
1738 m = re.search(r'-+', graph)
1693 if m:
1739 if m:
1694 yield (m.group(0), 'diffstat.deleted')
1740 yield (m.group(0), 'diffstat.deleted')
1695 else:
1741 else:
1696 yield (line, '')
1742 yield (line, '')
1697 yield ('\n', '')
1743 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now