##// END OF EJS Templates
narrow: use merge.ACTION_GET instead of duplicating 'g' constant...
Martin von Zweigbergk -
r41213:5838afea default
parent child Browse files
Show More
@@ -1,296 +1,296 b''
1 1 # narrowspec.py - methods for working with a narrow view of a repository
2 2 #
3 3 # Copyright 2017 Google, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 error,
15 15 match as matchmod,
16 16 merge,
17 17 repository,
18 18 sparse,
19 19 util,
20 20 )
21 21
22 22 # The file in .hg/store/ that indicates which paths exit in the store
23 23 FILENAME = 'narrowspec'
24 24 # The file in .hg/ that indicates which paths exit in the dirstate
25 25 DIRSTATE_FILENAME = 'narrowspec.dirstate'
26 26
27 27 # Pattern prefixes that are allowed in narrow patterns. This list MUST
28 28 # only contain patterns that are fast and safe to evaluate. Keep in mind
29 29 # that patterns are supplied by clients and executed on remote servers
30 30 # as part of wire protocol commands. That means that changes to this
31 31 # data structure influence the wire protocol and should not be taken
32 32 # lightly - especially removals.
33 33 VALID_PREFIXES = (
34 34 b'path:',
35 35 b'rootfilesin:',
36 36 )
37 37
38 38 def normalizesplitpattern(kind, pat):
39 39 """Returns the normalized version of a pattern and kind.
40 40
41 41 Returns a tuple with the normalized kind and normalized pattern.
42 42 """
43 43 pat = pat.rstrip('/')
44 44 _validatepattern(pat)
45 45 return kind, pat
46 46
47 47 def _numlines(s):
48 48 """Returns the number of lines in s, including ending empty lines."""
49 49 # We use splitlines because it is Unicode-friendly and thus Python 3
50 50 # compatible. However, it does not count empty lines at the end, so trick
51 51 # it by adding a character at the end.
52 52 return len((s + 'x').splitlines())
53 53
54 54 def _validatepattern(pat):
55 55 """Validates the pattern and aborts if it is invalid.
56 56
57 57 Patterns are stored in the narrowspec as newline-separated
58 58 POSIX-style bytestring paths. There's no escaping.
59 59 """
60 60
61 61 # We use newlines as separators in the narrowspec file, so don't allow them
62 62 # in patterns.
63 63 if _numlines(pat) > 1:
64 64 raise error.Abort(_('newlines are not allowed in narrowspec paths'))
65 65
66 66 components = pat.split('/')
67 67 if '.' in components or '..' in components:
68 68 raise error.Abort(_('"." and ".." are not allowed in narrowspec paths'))
69 69
70 70 def normalizepattern(pattern, defaultkind='path'):
71 71 """Returns the normalized version of a text-format pattern.
72 72
73 73 If the pattern has no kind, the default will be added.
74 74 """
75 75 kind, pat = matchmod._patsplit(pattern, defaultkind)
76 76 return '%s:%s' % normalizesplitpattern(kind, pat)
77 77
78 78 def parsepatterns(pats):
79 79 """Parses an iterable of patterns into a typed pattern set.
80 80
81 81 Patterns are assumed to be ``path:`` if no prefix is present.
82 82 For safety and performance reasons, only some prefixes are allowed.
83 83 See ``validatepatterns()``.
84 84
85 85 This function should be used on patterns that come from the user to
86 86 normalize and validate them to the internal data structure used for
87 87 representing patterns.
88 88 """
89 89 res = {normalizepattern(orig) for orig in pats}
90 90 validatepatterns(res)
91 91 return res
92 92
93 93 def validatepatterns(pats):
94 94 """Validate that patterns are in the expected data structure and format.
95 95
96 96 And that is a set of normalized patterns beginning with ``path:`` or
97 97 ``rootfilesin:``.
98 98
99 99 This function should be used to validate internal data structures
100 100 and patterns that are loaded from sources that use the internal,
101 101 prefixed pattern representation (but can't necessarily be fully trusted).
102 102 """
103 103 if not isinstance(pats, set):
104 104 raise error.ProgrammingError('narrow patterns should be a set; '
105 105 'got %r' % pats)
106 106
107 107 for pat in pats:
108 108 if not pat.startswith(VALID_PREFIXES):
109 109 # Use a Mercurial exception because this can happen due to user
110 110 # bugs (e.g. manually updating spec file).
111 111 raise error.Abort(_('invalid prefix on narrow pattern: %s') % pat,
112 112 hint=_('narrow patterns must begin with one of '
113 113 'the following: %s') %
114 114 ', '.join(VALID_PREFIXES))
115 115
116 116 def format(includes, excludes):
117 117 output = '[include]\n'
118 118 for i in sorted(includes - excludes):
119 119 output += i + '\n'
120 120 output += '[exclude]\n'
121 121 for e in sorted(excludes):
122 122 output += e + '\n'
123 123 return output
124 124
125 125 def match(root, include=None, exclude=None):
126 126 if not include:
127 127 # Passing empty include and empty exclude to matchmod.match()
128 128 # gives a matcher that matches everything, so explicitly use
129 129 # the nevermatcher.
130 130 return matchmod.never(root, '')
131 131 return matchmod.match(root, '', [], include=include or [],
132 132 exclude=exclude or [])
133 133
134 134 def parseconfig(ui, spec):
135 135 # maybe we should care about the profiles returned too
136 136 includepats, excludepats, profiles = sparse.parseconfig(ui, spec, 'narrow')
137 137 if profiles:
138 138 raise error.Abort(_("including other spec files using '%include' is not"
139 139 " supported in narrowspec"))
140 140
141 141 validatepatterns(includepats)
142 142 validatepatterns(excludepats)
143 143
144 144 return includepats, excludepats
145 145
146 146 def load(repo):
147 147 try:
148 148 spec = repo.svfs.read(FILENAME)
149 149 except IOError as e:
150 150 # Treat "narrowspec does not exist" the same as "narrowspec file exists
151 151 # and is empty".
152 152 if e.errno == errno.ENOENT:
153 153 return set(), set()
154 154 raise
155 155
156 156 return parseconfig(repo.ui, spec)
157 157
158 158 def save(repo, includepats, excludepats):
159 159 validatepatterns(includepats)
160 160 validatepatterns(excludepats)
161 161 spec = format(includepats, excludepats)
162 162 repo.svfs.write(FILENAME, spec)
163 163
164 164 def copytoworkingcopy(repo, tr):
165 165 if tr:
166 166 def write(file):
167 167 spec = repo.svfs.read(FILENAME)
168 168 file.write(spec)
169 169 file.close()
170 170 tr.addfilegenerator('narrowspec', (DIRSTATE_FILENAME,), write,
171 171 location='plain')
172 172 else:
173 173 spec = repo.svfs.read(FILENAME)
174 174 repo.vfs.write(DIRSTATE_FILENAME, spec)
175 175
176 176 def savebackup(repo, backupname):
177 177 if repository.NARROW_REQUIREMENT not in repo.requirements:
178 178 return
179 179 svfs = repo.svfs
180 180 svfs.tryunlink(backupname)
181 181 util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
182 182
183 183 def restorebackup(repo, backupname):
184 184 if repository.NARROW_REQUIREMENT not in repo.requirements:
185 185 return
186 186 util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
187 187
188 188 def clearbackup(repo, backupname):
189 189 if repository.NARROW_REQUIREMENT not in repo.requirements:
190 190 return
191 191 repo.svfs.unlink(backupname)
192 192
193 193 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
194 194 r""" Restricts the patterns according to repo settings,
195 195 results in a logical AND operation
196 196
197 197 :param req_includes: requested includes
198 198 :param req_excludes: requested excludes
199 199 :param repo_includes: repo includes
200 200 :param repo_excludes: repo excludes
201 201 :return: include patterns, exclude patterns, and invalid include patterns.
202 202
203 203 >>> restrictpatterns({'f1','f2'}, {}, ['f1'], [])
204 204 (set(['f1']), {}, [])
205 205 >>> restrictpatterns({'f1'}, {}, ['f1','f2'], [])
206 206 (set(['f1']), {}, [])
207 207 >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], [])
208 208 (set(['f1/fc1']), {}, [])
209 209 >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], [])
210 210 ([], set(['path:.']), [])
211 211 >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], [])
212 212 (set(['f2/fc2']), {}, [])
213 213 >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], [])
214 214 ([], set(['path:.']), [])
215 215 >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], [])
216 216 (set(['f1/$non_exitent_var']), {}, [])
217 217 """
218 218 res_excludes = set(req_excludes)
219 219 res_excludes.update(repo_excludes)
220 220 invalid_includes = []
221 221 if not req_includes:
222 222 res_includes = set(repo_includes)
223 223 elif 'path:.' not in repo_includes:
224 224 res_includes = []
225 225 for req_include in req_includes:
226 226 req_include = util.expandpath(util.normpath(req_include))
227 227 if req_include in repo_includes:
228 228 res_includes.append(req_include)
229 229 continue
230 230 valid = False
231 231 for repo_include in repo_includes:
232 232 if req_include.startswith(repo_include + '/'):
233 233 valid = True
234 234 res_includes.append(req_include)
235 235 break
236 236 if not valid:
237 237 invalid_includes.append(req_include)
238 238 if len(res_includes) == 0:
239 239 res_excludes = {'path:.'}
240 240 else:
241 241 res_includes = set(res_includes)
242 242 else:
243 243 res_includes = set(req_includes)
244 244 return res_includes, res_excludes, invalid_includes
245 245
246 246 # These two are extracted for extensions (specifically for Google's CitC file
247 247 # system)
248 248 def _deletecleanfiles(repo, files):
249 249 for f in files:
250 250 repo.wvfs.unlinkpath(f)
251 251
252 252 def _writeaddedfiles(repo, pctx, files):
253 253 actions = merge.emptyactions()
254 addgaction = actions['g'].append
254 addgaction = actions[merge.ACTION_GET].append
255 255 mf = repo['.'].manifest()
256 256 for f in files:
257 257 if not repo.wvfs.exists(f):
258 258 addgaction((f, (mf.flags(f), False), "narrowspec updated"))
259 259 merge.applyupdates(repo, actions, wctx=repo[None],
260 260 mctx=repo['.'], overwrite=False)
261 261
262 262 def checkworkingcopynarrowspec(repo):
263 263 storespec = repo.svfs.tryread(FILENAME)
264 264 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
265 265 if wcspec != storespec:
266 266 raise error.Abort(_("working copy's narrowspec is stale"),
267 267 hint=_("run 'hg tracked --update-working-copy'"))
268 268
269 269 def updateworkingcopy(repo):
270 270 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
271 271 newspec = repo.svfs.tryread(FILENAME)
272 272
273 273 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
274 274 newincludes, newexcludes = parseconfig(repo.ui, newspec)
275 275 oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
276 276 newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
277 277 addedmatch = matchmod.differencematcher(newmatch, oldmatch)
278 278 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
279 279
280 280 ds = repo.dirstate
281 281 lookup, status = ds.status(removedmatch, subrepos=[], ignored=False,
282 282 clean=True, unknown=False)
283 283 _deletecleanfiles(repo, status.clean)
284 284 trackeddirty = lookup + status.modified + status.added
285 285 for f in sorted(trackeddirty):
286 286 repo.ui.status(_('not deleting possibly dirty file %s\n') % f)
287 287 for f in status.clean + trackeddirty:
288 288 ds.drop(f)
289 289
290 290 repo.narrowpats = newincludes, newexcludes
291 291 repo._narrowmatch = newmatch
292 292 pctx = repo['.']
293 293 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
294 294 for f in newfiles:
295 295 ds.normallookup(f)
296 296 _writeaddedfiles(repo, pctx, newfiles)
General Comments 0
You need to be logged in to leave comments. Login now