##// END OF EJS Templates
narrow: make warning about possibly dirty files respect ui.relative-paths...
Martin von Zweigbergk -
r42326:770f5f58 default
parent child Browse files
Show More
@@ -1,316 +1,318 b''
1 1 # narrowspec.py - methods for working with a narrow view of a repository
2 2 #
3 3 # Copyright 2017 Google, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 error,
15 15 match as matchmod,
16 16 merge,
17 17 repository,
18 scmutil,
18 19 sparse,
19 20 util,
20 21 )
21 22
22 23 # The file in .hg/store/ that indicates which paths exit in the store
23 24 FILENAME = 'narrowspec'
24 25 # The file in .hg/ that indicates which paths exit in the dirstate
25 26 DIRSTATE_FILENAME = 'narrowspec.dirstate'
26 27
27 28 # Pattern prefixes that are allowed in narrow patterns. This list MUST
28 29 # only contain patterns that are fast and safe to evaluate. Keep in mind
29 30 # that patterns are supplied by clients and executed on remote servers
30 31 # as part of wire protocol commands. That means that changes to this
31 32 # data structure influence the wire protocol and should not be taken
32 33 # lightly - especially removals.
33 34 VALID_PREFIXES = (
34 35 b'path:',
35 36 b'rootfilesin:',
36 37 )
37 38
38 39 def normalizesplitpattern(kind, pat):
39 40 """Returns the normalized version of a pattern and kind.
40 41
41 42 Returns a tuple with the normalized kind and normalized pattern.
42 43 """
43 44 pat = pat.rstrip('/')
44 45 _validatepattern(pat)
45 46 return kind, pat
46 47
47 48 def _numlines(s):
48 49 """Returns the number of lines in s, including ending empty lines."""
49 50 # We use splitlines because it is Unicode-friendly and thus Python 3
50 51 # compatible. However, it does not count empty lines at the end, so trick
51 52 # it by adding a character at the end.
52 53 return len((s + 'x').splitlines())
53 54
54 55 def _validatepattern(pat):
55 56 """Validates the pattern and aborts if it is invalid.
56 57
57 58 Patterns are stored in the narrowspec as newline-separated
58 59 POSIX-style bytestring paths. There's no escaping.
59 60 """
60 61
61 62 # We use newlines as separators in the narrowspec file, so don't allow them
62 63 # in patterns.
63 64 if _numlines(pat) > 1:
64 65 raise error.Abort(_('newlines are not allowed in narrowspec paths'))
65 66
66 67 components = pat.split('/')
67 68 if '.' in components or '..' in components:
68 69 raise error.Abort(_('"." and ".." are not allowed in narrowspec paths'))
69 70
70 71 def normalizepattern(pattern, defaultkind='path'):
71 72 """Returns the normalized version of a text-format pattern.
72 73
73 74 If the pattern has no kind, the default will be added.
74 75 """
75 76 kind, pat = matchmod._patsplit(pattern, defaultkind)
76 77 return '%s:%s' % normalizesplitpattern(kind, pat)
77 78
78 79 def parsepatterns(pats):
79 80 """Parses an iterable of patterns into a typed pattern set.
80 81
81 82 Patterns are assumed to be ``path:`` if no prefix is present.
82 83 For safety and performance reasons, only some prefixes are allowed.
83 84 See ``validatepatterns()``.
84 85
85 86 This function should be used on patterns that come from the user to
86 87 normalize and validate them to the internal data structure used for
87 88 representing patterns.
88 89 """
89 90 res = {normalizepattern(orig) for orig in pats}
90 91 validatepatterns(res)
91 92 return res
92 93
93 94 def validatepatterns(pats):
94 95 """Validate that patterns are in the expected data structure and format.
95 96
96 97 And that is a set of normalized patterns beginning with ``path:`` or
97 98 ``rootfilesin:``.
98 99
99 100 This function should be used to validate internal data structures
100 101 and patterns that are loaded from sources that use the internal,
101 102 prefixed pattern representation (but can't necessarily be fully trusted).
102 103 """
103 104 if not isinstance(pats, set):
104 105 raise error.ProgrammingError('narrow patterns should be a set; '
105 106 'got %r' % pats)
106 107
107 108 for pat in pats:
108 109 if not pat.startswith(VALID_PREFIXES):
109 110 # Use a Mercurial exception because this can happen due to user
110 111 # bugs (e.g. manually updating spec file).
111 112 raise error.Abort(_('invalid prefix on narrow pattern: %s') % pat,
112 113 hint=_('narrow patterns must begin with one of '
113 114 'the following: %s') %
114 115 ', '.join(VALID_PREFIXES))
115 116
116 117 def format(includes, excludes):
117 118 output = '[include]\n'
118 119 for i in sorted(includes - excludes):
119 120 output += i + '\n'
120 121 output += '[exclude]\n'
121 122 for e in sorted(excludes):
122 123 output += e + '\n'
123 124 return output
124 125
125 126 def match(root, include=None, exclude=None):
126 127 if not include:
127 128 # Passing empty include and empty exclude to matchmod.match()
128 129 # gives a matcher that matches everything, so explicitly use
129 130 # the nevermatcher.
130 131 return matchmod.never()
131 132 return matchmod.match(root, '', [], include=include or [],
132 133 exclude=exclude or [])
133 134
134 135 def parseconfig(ui, spec):
135 136 # maybe we should care about the profiles returned too
136 137 includepats, excludepats, profiles = sparse.parseconfig(ui, spec, 'narrow')
137 138 if profiles:
138 139 raise error.Abort(_("including other spec files using '%include' is not"
139 140 " supported in narrowspec"))
140 141
141 142 validatepatterns(includepats)
142 143 validatepatterns(excludepats)
143 144
144 145 return includepats, excludepats
145 146
146 147 def load(repo):
147 148 try:
148 149 spec = repo.svfs.read(FILENAME)
149 150 except IOError as e:
150 151 # Treat "narrowspec does not exist" the same as "narrowspec file exists
151 152 # and is empty".
152 153 if e.errno == errno.ENOENT:
153 154 return set(), set()
154 155 raise
155 156
156 157 return parseconfig(repo.ui, spec)
157 158
158 159 def save(repo, includepats, excludepats):
159 160 validatepatterns(includepats)
160 161 validatepatterns(excludepats)
161 162 spec = format(includepats, excludepats)
162 163 repo.svfs.write(FILENAME, spec)
163 164
164 165 def copytoworkingcopy(repo):
165 166 spec = repo.svfs.read(FILENAME)
166 167 repo.vfs.write(DIRSTATE_FILENAME, spec)
167 168
168 169 def savebackup(repo, backupname):
169 170 if repository.NARROW_REQUIREMENT not in repo.requirements:
170 171 return
171 172 svfs = repo.svfs
172 173 svfs.tryunlink(backupname)
173 174 util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
174 175
175 176 def restorebackup(repo, backupname):
176 177 if repository.NARROW_REQUIREMENT not in repo.requirements:
177 178 return
178 179 util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
179 180
180 181 def savewcbackup(repo, backupname):
181 182 if repository.NARROW_REQUIREMENT not in repo.requirements:
182 183 return
183 184 vfs = repo.vfs
184 185 vfs.tryunlink(backupname)
185 186 # It may not exist in old repos
186 187 if vfs.exists(DIRSTATE_FILENAME):
187 188 util.copyfile(vfs.join(DIRSTATE_FILENAME), vfs.join(backupname),
188 189 hardlink=True)
189 190
190 191 def restorewcbackup(repo, backupname):
191 192 if repository.NARROW_REQUIREMENT not in repo.requirements:
192 193 return
193 194 # It may not exist in old repos
194 195 if repo.vfs.exists(backupname):
195 196 util.rename(repo.vfs.join(backupname), repo.vfs.join(DIRSTATE_FILENAME))
196 197
197 198 def clearwcbackup(repo, backupname):
198 199 if repository.NARROW_REQUIREMENT not in repo.requirements:
199 200 return
200 201 repo.vfs.tryunlink(backupname)
201 202
202 203 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
203 204 r""" Restricts the patterns according to repo settings,
204 205 results in a logical AND operation
205 206
206 207 :param req_includes: requested includes
207 208 :param req_excludes: requested excludes
208 209 :param repo_includes: repo includes
209 210 :param repo_excludes: repo excludes
210 211 :return: include patterns, exclude patterns, and invalid include patterns.
211 212
212 213 >>> restrictpatterns({'f1','f2'}, {}, ['f1'], [])
213 214 (set(['f1']), {}, [])
214 215 >>> restrictpatterns({'f1'}, {}, ['f1','f2'], [])
215 216 (set(['f1']), {}, [])
216 217 >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], [])
217 218 (set(['f1/fc1']), {}, [])
218 219 >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], [])
219 220 ([], set(['path:.']), [])
220 221 >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], [])
221 222 (set(['f2/fc2']), {}, [])
222 223 >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], [])
223 224 ([], set(['path:.']), [])
224 225 >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], [])
225 226 (set(['f1/$non_exitent_var']), {}, [])
226 227 """
227 228 res_excludes = set(req_excludes)
228 229 res_excludes.update(repo_excludes)
229 230 invalid_includes = []
230 231 if not req_includes:
231 232 res_includes = set(repo_includes)
232 233 elif 'path:.' not in repo_includes:
233 234 res_includes = []
234 235 for req_include in req_includes:
235 236 req_include = util.expandpath(util.normpath(req_include))
236 237 if req_include in repo_includes:
237 238 res_includes.append(req_include)
238 239 continue
239 240 valid = False
240 241 for repo_include in repo_includes:
241 242 if req_include.startswith(repo_include + '/'):
242 243 valid = True
243 244 res_includes.append(req_include)
244 245 break
245 246 if not valid:
246 247 invalid_includes.append(req_include)
247 248 if len(res_includes) == 0:
248 249 res_excludes = {'path:.'}
249 250 else:
250 251 res_includes = set(res_includes)
251 252 else:
252 253 res_includes = set(req_includes)
253 254 return res_includes, res_excludes, invalid_includes
254 255
255 256 # These two are extracted for extensions (specifically for Google's CitC file
256 257 # system)
257 258 def _deletecleanfiles(repo, files):
258 259 for f in files:
259 260 repo.wvfs.unlinkpath(f)
260 261
261 262 def _writeaddedfiles(repo, pctx, files):
262 263 actions = merge.emptyactions()
263 264 addgaction = actions[merge.ACTION_GET].append
264 265 mf = repo['.'].manifest()
265 266 for f in files:
266 267 if not repo.wvfs.exists(f):
267 268 addgaction((f, (mf.flags(f), False), "narrowspec updated"))
268 269 merge.applyupdates(repo, actions, wctx=repo[None],
269 270 mctx=repo['.'], overwrite=False)
270 271
271 272 def checkworkingcopynarrowspec(repo):
272 273 storespec = repo.svfs.tryread(FILENAME)
273 274 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
274 275 if wcspec != storespec:
275 276 raise error.Abort(_("working copy's narrowspec is stale"),
276 277 hint=_("run 'hg tracked --update-working-copy'"))
277 278
278 279 def updateworkingcopy(repo, assumeclean=False):
279 280 """updates the working copy and dirstate from the store narrowspec
280 281
281 282 When assumeclean=True, files that are not known to be clean will also
282 283 be deleted. It is then up to the caller to make sure they are clean.
283 284 """
284 285 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
285 286 newspec = repo.svfs.tryread(FILENAME)
286 287
287 288 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
288 289 newincludes, newexcludes = parseconfig(repo.ui, newspec)
289 290 oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
290 291 newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
291 292 addedmatch = matchmod.differencematcher(newmatch, oldmatch)
292 293 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
293 294
294 295 ds = repo.dirstate
295 296 lookup, status = ds.status(removedmatch, subrepos=[], ignored=False,
296 297 clean=True, unknown=False)
297 298 trackeddirty = status.modified + status.added
298 299 clean = status.clean
299 300 if assumeclean:
300 301 assert not trackeddirty
301 302 clean.extend(lookup)
302 303 else:
303 304 trackeddirty.extend(lookup)
304 305 _deletecleanfiles(repo, clean)
306 uipathfn = scmutil.getuipathfn(repo)
305 307 for f in sorted(trackeddirty):
306 repo.ui.status(_('not deleting possibly dirty file %s\n') % f)
308 repo.ui.status(_('not deleting possibly dirty file %s\n') % uipathfn(f))
307 309 for f in clean + trackeddirty:
308 310 ds.drop(f)
309 311
310 312 repo.narrowpats = newincludes, newexcludes
311 313 repo._narrowmatch = newmatch
312 314 pctx = repo['.']
313 315 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
314 316 for f in newfiles:
315 317 ds.normallookup(f)
316 318 _writeaddedfiles(repo, pctx, newfiles)
General Comments 0
You need to be logged in to leave comments. Login now