##// END OF EJS Templates
narrow: make warning about possibly dirty files respect ui.relative-paths...
Martin von Zweigbergk -
r42326:770f5f58 default
parent child Browse files
Show More
@@ -1,316 +1,318 b''
1 # narrowspec.py - methods for working with a narrow view of a repository
1 # narrowspec.py - methods for working with a narrow view of a repository
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 error,
14 error,
15 match as matchmod,
15 match as matchmod,
16 merge,
16 merge,
17 repository,
17 repository,
18 scmutil,
18 sparse,
19 sparse,
19 util,
20 util,
20 )
21 )
21
22
22 # The file in .hg/store/ that indicates which paths exit in the store
23 # The file in .hg/store/ that indicates which paths exit in the store
23 FILENAME = 'narrowspec'
24 FILENAME = 'narrowspec'
24 # The file in .hg/ that indicates which paths exit in the dirstate
25 # The file in .hg/ that indicates which paths exit in the dirstate
25 DIRSTATE_FILENAME = 'narrowspec.dirstate'
26 DIRSTATE_FILENAME = 'narrowspec.dirstate'
26
27
27 # Pattern prefixes that are allowed in narrow patterns. This list MUST
28 # Pattern prefixes that are allowed in narrow patterns. This list MUST
28 # only contain patterns that are fast and safe to evaluate. Keep in mind
29 # only contain patterns that are fast and safe to evaluate. Keep in mind
29 # that patterns are supplied by clients and executed on remote servers
30 # that patterns are supplied by clients and executed on remote servers
30 # as part of wire protocol commands. That means that changes to this
31 # as part of wire protocol commands. That means that changes to this
31 # data structure influence the wire protocol and should not be taken
32 # data structure influence the wire protocol and should not be taken
32 # lightly - especially removals.
33 # lightly - especially removals.
33 VALID_PREFIXES = (
34 VALID_PREFIXES = (
34 b'path:',
35 b'path:',
35 b'rootfilesin:',
36 b'rootfilesin:',
36 )
37 )
37
38
38 def normalizesplitpattern(kind, pat):
39 def normalizesplitpattern(kind, pat):
39 """Returns the normalized version of a pattern and kind.
40 """Returns the normalized version of a pattern and kind.
40
41
41 Returns a tuple with the normalized kind and normalized pattern.
42 Returns a tuple with the normalized kind and normalized pattern.
42 """
43 """
43 pat = pat.rstrip('/')
44 pat = pat.rstrip('/')
44 _validatepattern(pat)
45 _validatepattern(pat)
45 return kind, pat
46 return kind, pat
46
47
47 def _numlines(s):
48 def _numlines(s):
48 """Returns the number of lines in s, including ending empty lines."""
49 """Returns the number of lines in s, including ending empty lines."""
49 # We use splitlines because it is Unicode-friendly and thus Python 3
50 # We use splitlines because it is Unicode-friendly and thus Python 3
50 # compatible. However, it does not count empty lines at the end, so trick
51 # compatible. However, it does not count empty lines at the end, so trick
51 # it by adding a character at the end.
52 # it by adding a character at the end.
52 return len((s + 'x').splitlines())
53 return len((s + 'x').splitlines())
53
54
54 def _validatepattern(pat):
55 def _validatepattern(pat):
55 """Validates the pattern and aborts if it is invalid.
56 """Validates the pattern and aborts if it is invalid.
56
57
57 Patterns are stored in the narrowspec as newline-separated
58 Patterns are stored in the narrowspec as newline-separated
58 POSIX-style bytestring paths. There's no escaping.
59 POSIX-style bytestring paths. There's no escaping.
59 """
60 """
60
61
61 # We use newlines as separators in the narrowspec file, so don't allow them
62 # We use newlines as separators in the narrowspec file, so don't allow them
62 # in patterns.
63 # in patterns.
63 if _numlines(pat) > 1:
64 if _numlines(pat) > 1:
64 raise error.Abort(_('newlines are not allowed in narrowspec paths'))
65 raise error.Abort(_('newlines are not allowed in narrowspec paths'))
65
66
66 components = pat.split('/')
67 components = pat.split('/')
67 if '.' in components or '..' in components:
68 if '.' in components or '..' in components:
68 raise error.Abort(_('"." and ".." are not allowed in narrowspec paths'))
69 raise error.Abort(_('"." and ".." are not allowed in narrowspec paths'))
69
70
70 def normalizepattern(pattern, defaultkind='path'):
71 def normalizepattern(pattern, defaultkind='path'):
71 """Returns the normalized version of a text-format pattern.
72 """Returns the normalized version of a text-format pattern.
72
73
73 If the pattern has no kind, the default will be added.
74 If the pattern has no kind, the default will be added.
74 """
75 """
75 kind, pat = matchmod._patsplit(pattern, defaultkind)
76 kind, pat = matchmod._patsplit(pattern, defaultkind)
76 return '%s:%s' % normalizesplitpattern(kind, pat)
77 return '%s:%s' % normalizesplitpattern(kind, pat)
77
78
78 def parsepatterns(pats):
79 def parsepatterns(pats):
79 """Parses an iterable of patterns into a typed pattern set.
80 """Parses an iterable of patterns into a typed pattern set.
80
81
81 Patterns are assumed to be ``path:`` if no prefix is present.
82 Patterns are assumed to be ``path:`` if no prefix is present.
82 For safety and performance reasons, only some prefixes are allowed.
83 For safety and performance reasons, only some prefixes are allowed.
83 See ``validatepatterns()``.
84 See ``validatepatterns()``.
84
85
85 This function should be used on patterns that come from the user to
86 This function should be used on patterns that come from the user to
86 normalize and validate them to the internal data structure used for
87 normalize and validate them to the internal data structure used for
87 representing patterns.
88 representing patterns.
88 """
89 """
89 res = {normalizepattern(orig) for orig in pats}
90 res = {normalizepattern(orig) for orig in pats}
90 validatepatterns(res)
91 validatepatterns(res)
91 return res
92 return res
92
93
93 def validatepatterns(pats):
94 def validatepatterns(pats):
94 """Validate that patterns are in the expected data structure and format.
95 """Validate that patterns are in the expected data structure and format.
95
96
96 And that is a set of normalized patterns beginning with ``path:`` or
97 And that is a set of normalized patterns beginning with ``path:`` or
97 ``rootfilesin:``.
98 ``rootfilesin:``.
98
99
99 This function should be used to validate internal data structures
100 This function should be used to validate internal data structures
100 and patterns that are loaded from sources that use the internal,
101 and patterns that are loaded from sources that use the internal,
101 prefixed pattern representation (but can't necessarily be fully trusted).
102 prefixed pattern representation (but can't necessarily be fully trusted).
102 """
103 """
103 if not isinstance(pats, set):
104 if not isinstance(pats, set):
104 raise error.ProgrammingError('narrow patterns should be a set; '
105 raise error.ProgrammingError('narrow patterns should be a set; '
105 'got %r' % pats)
106 'got %r' % pats)
106
107
107 for pat in pats:
108 for pat in pats:
108 if not pat.startswith(VALID_PREFIXES):
109 if not pat.startswith(VALID_PREFIXES):
109 # Use a Mercurial exception because this can happen due to user
110 # Use a Mercurial exception because this can happen due to user
110 # bugs (e.g. manually updating spec file).
111 # bugs (e.g. manually updating spec file).
111 raise error.Abort(_('invalid prefix on narrow pattern: %s') % pat,
112 raise error.Abort(_('invalid prefix on narrow pattern: %s') % pat,
112 hint=_('narrow patterns must begin with one of '
113 hint=_('narrow patterns must begin with one of '
113 'the following: %s') %
114 'the following: %s') %
114 ', '.join(VALID_PREFIXES))
115 ', '.join(VALID_PREFIXES))
115
116
116 def format(includes, excludes):
117 def format(includes, excludes):
117 output = '[include]\n'
118 output = '[include]\n'
118 for i in sorted(includes - excludes):
119 for i in sorted(includes - excludes):
119 output += i + '\n'
120 output += i + '\n'
120 output += '[exclude]\n'
121 output += '[exclude]\n'
121 for e in sorted(excludes):
122 for e in sorted(excludes):
122 output += e + '\n'
123 output += e + '\n'
123 return output
124 return output
124
125
125 def match(root, include=None, exclude=None):
126 def match(root, include=None, exclude=None):
126 if not include:
127 if not include:
127 # Passing empty include and empty exclude to matchmod.match()
128 # Passing empty include and empty exclude to matchmod.match()
128 # gives a matcher that matches everything, so explicitly use
129 # gives a matcher that matches everything, so explicitly use
129 # the nevermatcher.
130 # the nevermatcher.
130 return matchmod.never()
131 return matchmod.never()
131 return matchmod.match(root, '', [], include=include or [],
132 return matchmod.match(root, '', [], include=include or [],
132 exclude=exclude or [])
133 exclude=exclude or [])
133
134
134 def parseconfig(ui, spec):
135 def parseconfig(ui, spec):
135 # maybe we should care about the profiles returned too
136 # maybe we should care about the profiles returned too
136 includepats, excludepats, profiles = sparse.parseconfig(ui, spec, 'narrow')
137 includepats, excludepats, profiles = sparse.parseconfig(ui, spec, 'narrow')
137 if profiles:
138 if profiles:
138 raise error.Abort(_("including other spec files using '%include' is not"
139 raise error.Abort(_("including other spec files using '%include' is not"
139 " supported in narrowspec"))
140 " supported in narrowspec"))
140
141
141 validatepatterns(includepats)
142 validatepatterns(includepats)
142 validatepatterns(excludepats)
143 validatepatterns(excludepats)
143
144
144 return includepats, excludepats
145 return includepats, excludepats
145
146
146 def load(repo):
147 def load(repo):
147 try:
148 try:
148 spec = repo.svfs.read(FILENAME)
149 spec = repo.svfs.read(FILENAME)
149 except IOError as e:
150 except IOError as e:
150 # Treat "narrowspec does not exist" the same as "narrowspec file exists
151 # Treat "narrowspec does not exist" the same as "narrowspec file exists
151 # and is empty".
152 # and is empty".
152 if e.errno == errno.ENOENT:
153 if e.errno == errno.ENOENT:
153 return set(), set()
154 return set(), set()
154 raise
155 raise
155
156
156 return parseconfig(repo.ui, spec)
157 return parseconfig(repo.ui, spec)
157
158
158 def save(repo, includepats, excludepats):
159 def save(repo, includepats, excludepats):
159 validatepatterns(includepats)
160 validatepatterns(includepats)
160 validatepatterns(excludepats)
161 validatepatterns(excludepats)
161 spec = format(includepats, excludepats)
162 spec = format(includepats, excludepats)
162 repo.svfs.write(FILENAME, spec)
163 repo.svfs.write(FILENAME, spec)
163
164
164 def copytoworkingcopy(repo):
165 def copytoworkingcopy(repo):
165 spec = repo.svfs.read(FILENAME)
166 spec = repo.svfs.read(FILENAME)
166 repo.vfs.write(DIRSTATE_FILENAME, spec)
167 repo.vfs.write(DIRSTATE_FILENAME, spec)
167
168
168 def savebackup(repo, backupname):
169 def savebackup(repo, backupname):
169 if repository.NARROW_REQUIREMENT not in repo.requirements:
170 if repository.NARROW_REQUIREMENT not in repo.requirements:
170 return
171 return
171 svfs = repo.svfs
172 svfs = repo.svfs
172 svfs.tryunlink(backupname)
173 svfs.tryunlink(backupname)
173 util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
174 util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
174
175
175 def restorebackup(repo, backupname):
176 def restorebackup(repo, backupname):
176 if repository.NARROW_REQUIREMENT not in repo.requirements:
177 if repository.NARROW_REQUIREMENT not in repo.requirements:
177 return
178 return
178 util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
179 util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
179
180
180 def savewcbackup(repo, backupname):
181 def savewcbackup(repo, backupname):
181 if repository.NARROW_REQUIREMENT not in repo.requirements:
182 if repository.NARROW_REQUIREMENT not in repo.requirements:
182 return
183 return
183 vfs = repo.vfs
184 vfs = repo.vfs
184 vfs.tryunlink(backupname)
185 vfs.tryunlink(backupname)
185 # It may not exist in old repos
186 # It may not exist in old repos
186 if vfs.exists(DIRSTATE_FILENAME):
187 if vfs.exists(DIRSTATE_FILENAME):
187 util.copyfile(vfs.join(DIRSTATE_FILENAME), vfs.join(backupname),
188 util.copyfile(vfs.join(DIRSTATE_FILENAME), vfs.join(backupname),
188 hardlink=True)
189 hardlink=True)
189
190
190 def restorewcbackup(repo, backupname):
191 def restorewcbackup(repo, backupname):
191 if repository.NARROW_REQUIREMENT not in repo.requirements:
192 if repository.NARROW_REQUIREMENT not in repo.requirements:
192 return
193 return
193 # It may not exist in old repos
194 # It may not exist in old repos
194 if repo.vfs.exists(backupname):
195 if repo.vfs.exists(backupname):
195 util.rename(repo.vfs.join(backupname), repo.vfs.join(DIRSTATE_FILENAME))
196 util.rename(repo.vfs.join(backupname), repo.vfs.join(DIRSTATE_FILENAME))
196
197
197 def clearwcbackup(repo, backupname):
198 def clearwcbackup(repo, backupname):
198 if repository.NARROW_REQUIREMENT not in repo.requirements:
199 if repository.NARROW_REQUIREMENT not in repo.requirements:
199 return
200 return
200 repo.vfs.tryunlink(backupname)
201 repo.vfs.tryunlink(backupname)
201
202
202 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
203 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
203 r""" Restricts the patterns according to repo settings,
204 r""" Restricts the patterns according to repo settings,
204 results in a logical AND operation
205 results in a logical AND operation
205
206
206 :param req_includes: requested includes
207 :param req_includes: requested includes
207 :param req_excludes: requested excludes
208 :param req_excludes: requested excludes
208 :param repo_includes: repo includes
209 :param repo_includes: repo includes
209 :param repo_excludes: repo excludes
210 :param repo_excludes: repo excludes
210 :return: include patterns, exclude patterns, and invalid include patterns.
211 :return: include patterns, exclude patterns, and invalid include patterns.
211
212
212 >>> restrictpatterns({'f1','f2'}, {}, ['f1'], [])
213 >>> restrictpatterns({'f1','f2'}, {}, ['f1'], [])
213 (set(['f1']), {}, [])
214 (set(['f1']), {}, [])
214 >>> restrictpatterns({'f1'}, {}, ['f1','f2'], [])
215 >>> restrictpatterns({'f1'}, {}, ['f1','f2'], [])
215 (set(['f1']), {}, [])
216 (set(['f1']), {}, [])
216 >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], [])
217 >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], [])
217 (set(['f1/fc1']), {}, [])
218 (set(['f1/fc1']), {}, [])
218 >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], [])
219 >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], [])
219 ([], set(['path:.']), [])
220 ([], set(['path:.']), [])
220 >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], [])
221 >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], [])
221 (set(['f2/fc2']), {}, [])
222 (set(['f2/fc2']), {}, [])
222 >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], [])
223 >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], [])
223 ([], set(['path:.']), [])
224 ([], set(['path:.']), [])
224 >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], [])
225 >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], [])
225 (set(['f1/$non_exitent_var']), {}, [])
226 (set(['f1/$non_exitent_var']), {}, [])
226 """
227 """
227 res_excludes = set(req_excludes)
228 res_excludes = set(req_excludes)
228 res_excludes.update(repo_excludes)
229 res_excludes.update(repo_excludes)
229 invalid_includes = []
230 invalid_includes = []
230 if not req_includes:
231 if not req_includes:
231 res_includes = set(repo_includes)
232 res_includes = set(repo_includes)
232 elif 'path:.' not in repo_includes:
233 elif 'path:.' not in repo_includes:
233 res_includes = []
234 res_includes = []
234 for req_include in req_includes:
235 for req_include in req_includes:
235 req_include = util.expandpath(util.normpath(req_include))
236 req_include = util.expandpath(util.normpath(req_include))
236 if req_include in repo_includes:
237 if req_include in repo_includes:
237 res_includes.append(req_include)
238 res_includes.append(req_include)
238 continue
239 continue
239 valid = False
240 valid = False
240 for repo_include in repo_includes:
241 for repo_include in repo_includes:
241 if req_include.startswith(repo_include + '/'):
242 if req_include.startswith(repo_include + '/'):
242 valid = True
243 valid = True
243 res_includes.append(req_include)
244 res_includes.append(req_include)
244 break
245 break
245 if not valid:
246 if not valid:
246 invalid_includes.append(req_include)
247 invalid_includes.append(req_include)
247 if len(res_includes) == 0:
248 if len(res_includes) == 0:
248 res_excludes = {'path:.'}
249 res_excludes = {'path:.'}
249 else:
250 else:
250 res_includes = set(res_includes)
251 res_includes = set(res_includes)
251 else:
252 else:
252 res_includes = set(req_includes)
253 res_includes = set(req_includes)
253 return res_includes, res_excludes, invalid_includes
254 return res_includes, res_excludes, invalid_includes
254
255
255 # These two are extracted for extensions (specifically for Google's CitC file
256 # These two are extracted for extensions (specifically for Google's CitC file
256 # system)
257 # system)
257 def _deletecleanfiles(repo, files):
258 def _deletecleanfiles(repo, files):
258 for f in files:
259 for f in files:
259 repo.wvfs.unlinkpath(f)
260 repo.wvfs.unlinkpath(f)
260
261
261 def _writeaddedfiles(repo, pctx, files):
262 def _writeaddedfiles(repo, pctx, files):
262 actions = merge.emptyactions()
263 actions = merge.emptyactions()
263 addgaction = actions[merge.ACTION_GET].append
264 addgaction = actions[merge.ACTION_GET].append
264 mf = repo['.'].manifest()
265 mf = repo['.'].manifest()
265 for f in files:
266 for f in files:
266 if not repo.wvfs.exists(f):
267 if not repo.wvfs.exists(f):
267 addgaction((f, (mf.flags(f), False), "narrowspec updated"))
268 addgaction((f, (mf.flags(f), False), "narrowspec updated"))
268 merge.applyupdates(repo, actions, wctx=repo[None],
269 merge.applyupdates(repo, actions, wctx=repo[None],
269 mctx=repo['.'], overwrite=False)
270 mctx=repo['.'], overwrite=False)
270
271
271 def checkworkingcopynarrowspec(repo):
272 def checkworkingcopynarrowspec(repo):
272 storespec = repo.svfs.tryread(FILENAME)
273 storespec = repo.svfs.tryread(FILENAME)
273 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
274 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
274 if wcspec != storespec:
275 if wcspec != storespec:
275 raise error.Abort(_("working copy's narrowspec is stale"),
276 raise error.Abort(_("working copy's narrowspec is stale"),
276 hint=_("run 'hg tracked --update-working-copy'"))
277 hint=_("run 'hg tracked --update-working-copy'"))
277
278
278 def updateworkingcopy(repo, assumeclean=False):
279 def updateworkingcopy(repo, assumeclean=False):
279 """updates the working copy and dirstate from the store narrowspec
280 """updates the working copy and dirstate from the store narrowspec
280
281
281 When assumeclean=True, files that are not known to be clean will also
282 When assumeclean=True, files that are not known to be clean will also
282 be deleted. It is then up to the caller to make sure they are clean.
283 be deleted. It is then up to the caller to make sure they are clean.
283 """
284 """
284 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
285 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
285 newspec = repo.svfs.tryread(FILENAME)
286 newspec = repo.svfs.tryread(FILENAME)
286
287
287 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
288 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
288 newincludes, newexcludes = parseconfig(repo.ui, newspec)
289 newincludes, newexcludes = parseconfig(repo.ui, newspec)
289 oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
290 oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
290 newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
291 newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
291 addedmatch = matchmod.differencematcher(newmatch, oldmatch)
292 addedmatch = matchmod.differencematcher(newmatch, oldmatch)
292 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
293 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
293
294
294 ds = repo.dirstate
295 ds = repo.dirstate
295 lookup, status = ds.status(removedmatch, subrepos=[], ignored=False,
296 lookup, status = ds.status(removedmatch, subrepos=[], ignored=False,
296 clean=True, unknown=False)
297 clean=True, unknown=False)
297 trackeddirty = status.modified + status.added
298 trackeddirty = status.modified + status.added
298 clean = status.clean
299 clean = status.clean
299 if assumeclean:
300 if assumeclean:
300 assert not trackeddirty
301 assert not trackeddirty
301 clean.extend(lookup)
302 clean.extend(lookup)
302 else:
303 else:
303 trackeddirty.extend(lookup)
304 trackeddirty.extend(lookup)
304 _deletecleanfiles(repo, clean)
305 _deletecleanfiles(repo, clean)
306 uipathfn = scmutil.getuipathfn(repo)
305 for f in sorted(trackeddirty):
307 for f in sorted(trackeddirty):
306 repo.ui.status(_('not deleting possibly dirty file %s\n') % f)
308 repo.ui.status(_('not deleting possibly dirty file %s\n') % uipathfn(f))
307 for f in clean + trackeddirty:
309 for f in clean + trackeddirty:
308 ds.drop(f)
310 ds.drop(f)
309
311
310 repo.narrowpats = newincludes, newexcludes
312 repo.narrowpats = newincludes, newexcludes
311 repo._narrowmatch = newmatch
313 repo._narrowmatch = newmatch
312 pctx = repo['.']
314 pctx = repo['.']
313 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
315 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
314 for f in newfiles:
316 for f in newfiles:
315 ds.normallookup(f)
317 ds.normallookup(f)
316 _writeaddedfiles(repo, pctx, newfiles)
318 _writeaddedfiles(repo, pctx, newfiles)
General Comments 0
You need to be logged in to leave comments. Login now