Show More
@@ -1,1503 +1,1503 | |||
|
1 | 1 | # Copyright 2009-2010 Gregory P. Ward |
|
2 | 2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated |
|
3 | 3 | # Copyright 2010-2011 Fog Creek Software |
|
4 | 4 | # Copyright 2010-2011 Unity Technologies |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''Overridden Mercurial commands and functions for the largefiles extension''' |
|
10 | 10 | from __future__ import absolute_import |
|
11 | 11 | |
|
12 | 12 | import copy |
|
13 | 13 | import os |
|
14 | 14 | |
|
15 | 15 | from mercurial.i18n import _ |
|
16 | 16 | |
|
17 | 17 | from mercurial.hgweb import ( |
|
18 | 18 | webcommands, |
|
19 | 19 | ) |
|
20 | 20 | |
|
21 | 21 | from mercurial import ( |
|
22 | 22 | archival, |
|
23 | 23 | cmdutil, |
|
24 | 24 | copies as copiesmod, |
|
25 | 25 | error, |
|
26 | 26 | exchange, |
|
27 | 27 | extensions, |
|
28 | 28 | exthelper, |
|
29 | 29 | filemerge, |
|
30 | 30 | hg, |
|
31 | 31 | logcmdutil, |
|
32 | 32 | match as matchmod, |
|
33 | 33 | merge, |
|
34 | 34 | pathutil, |
|
35 | 35 | pycompat, |
|
36 | 36 | scmutil, |
|
37 | 37 | smartset, |
|
38 | 38 | subrepo, |
|
39 | 39 | upgrade, |
|
40 | 40 | url as urlmod, |
|
41 | 41 | util, |
|
42 | 42 | ) |
|
43 | 43 | |
|
44 | 44 | from . import ( |
|
45 | 45 | lfcommands, |
|
46 | 46 | lfutil, |
|
47 | 47 | storefactory, |
|
48 | 48 | ) |
|
49 | 49 | |
|
50 | 50 | eh = exthelper.exthelper() |
|
51 | 51 | |
|
52 | 52 | # -- Utility functions: commonly/repeatedly needed functionality --------------- |
|
53 | 53 | |
|
54 | 54 | def composelargefilematcher(match, manifest): |
|
55 | 55 | '''create a matcher that matches only the largefiles in the original |
|
56 | 56 | matcher''' |
|
57 | 57 | m = copy.copy(match) |
|
58 | 58 | lfile = lambda f: lfutil.standin(f) in manifest |
|
59 | 59 | m._files = [lf for lf in m._files if lfile(lf)] |
|
60 | 60 | m._fileset = set(m._files) |
|
61 | 61 | m.always = lambda: False |
|
62 | 62 | origmatchfn = m.matchfn |
|
63 | 63 | m.matchfn = lambda f: lfile(f) and origmatchfn(f) |
|
64 | 64 | return m |
|
65 | 65 | |
|
66 | 66 | def composenormalfilematcher(match, manifest, exclude=None): |
|
67 | 67 | excluded = set() |
|
68 | 68 | if exclude is not None: |
|
69 | 69 | excluded.update(exclude) |
|
70 | 70 | |
|
71 | 71 | m = copy.copy(match) |
|
72 | 72 | notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in |
|
73 | 73 | manifest or f in excluded) |
|
74 | 74 | m._files = [lf for lf in m._files if notlfile(lf)] |
|
75 | 75 | m._fileset = set(m._files) |
|
76 | 76 | m.always = lambda: False |
|
77 | 77 | origmatchfn = m.matchfn |
|
78 | 78 | m.matchfn = lambda f: notlfile(f) and origmatchfn(f) |
|
79 | 79 | return m |
|
80 | 80 | |
|
81 | 81 | def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts): |
|
82 | 82 | large = opts.get(r'large') |
|
83 | 83 | lfsize = lfutil.getminsize( |
|
84 | 84 | ui, lfutil.islfilesrepo(repo), opts.get(r'lfsize')) |
|
85 | 85 | |
|
86 | 86 | lfmatcher = None |
|
87 | 87 | if lfutil.islfilesrepo(repo): |
|
88 | 88 | lfpats = ui.configlist(lfutil.longname, 'patterns') |
|
89 | 89 | if lfpats: |
|
90 | 90 | lfmatcher = matchmod.match(repo.root, '', list(lfpats)) |
|
91 | 91 | |
|
92 | 92 | lfnames = [] |
|
93 | 93 | m = matcher |
|
94 | 94 | |
|
95 | 95 | wctx = repo[None] |
|
96 | 96 | for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)): |
|
97 | 97 | exact = m.exact(f) |
|
98 | 98 | lfile = lfutil.standin(f) in wctx |
|
99 | 99 | nfile = f in wctx |
|
100 | 100 | exists = lfile or nfile |
|
101 | 101 | |
|
102 | 102 | # Don't warn the user when they attempt to add a normal tracked file. |
|
103 | 103 | # The normal add code will do that for us. |
|
104 | 104 | if exact and exists: |
|
105 | 105 | if lfile: |
|
106 | 106 | ui.warn(_('%s already a largefile\n') % uipathfn(f)) |
|
107 | 107 | continue |
|
108 | 108 | |
|
109 | 109 | if (exact or not exists) and not lfutil.isstandin(f): |
|
110 | 110 | # In case the file was removed previously, but not committed |
|
111 | 111 | # (issue3507) |
|
112 | 112 | if not repo.wvfs.exists(f): |
|
113 | 113 | continue |
|
114 | 114 | |
|
115 | 115 | abovemin = (lfsize and |
|
116 | 116 | repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024) |
|
117 | 117 | if large or abovemin or (lfmatcher and lfmatcher(f)): |
|
118 | 118 | lfnames.append(f) |
|
119 | 119 | if ui.verbose or not exact: |
|
120 | 120 | ui.status(_('adding %s as a largefile\n') % uipathfn(f)) |
|
121 | 121 | |
|
122 | 122 | bad = [] |
|
123 | 123 | |
|
124 | 124 | # Need to lock, otherwise there could be a race condition between |
|
125 | 125 | # when standins are created and added to the repo. |
|
126 | 126 | with repo.wlock(): |
|
127 | 127 | if not opts.get(r'dry_run'): |
|
128 | 128 | standins = [] |
|
129 | 129 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
130 | 130 | for f in lfnames: |
|
131 | 131 | standinname = lfutil.standin(f) |
|
132 | 132 | lfutil.writestandin(repo, standinname, hash='', |
|
133 | 133 | executable=lfutil.getexecutable(repo.wjoin(f))) |
|
134 | 134 | standins.append(standinname) |
|
135 | 135 | if lfdirstate[f] == 'r': |
|
136 | 136 | lfdirstate.normallookup(f) |
|
137 | 137 | else: |
|
138 | 138 | lfdirstate.add(f) |
|
139 | 139 | lfdirstate.write() |
|
140 | 140 | bad += [lfutil.splitstandin(f) |
|
141 | 141 | for f in repo[None].add(standins) |
|
142 | 142 | if f in m.files()] |
|
143 | 143 | |
|
144 | 144 | added = [f for f in lfnames if f not in bad] |
|
145 | 145 | return added, bad |
|
146 | 146 | |
|
147 | 147 | def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts): |
|
148 | 148 | after = opts.get(r'after') |
|
149 | 149 | m = composelargefilematcher(matcher, repo[None].manifest()) |
|
150 | 150 | try: |
|
151 | 151 | repo.lfstatus = True |
|
152 | 152 | s = repo.status(match=m, clean=not isaddremove) |
|
153 | 153 | finally: |
|
154 | 154 | repo.lfstatus = False |
|
155 | 155 | manifest = repo[None].manifest() |
|
156 | 156 | modified, added, deleted, clean = [[f for f in list |
|
157 | 157 | if lfutil.standin(f) in manifest] |
|
158 | 158 | for list in (s.modified, s.added, |
|
159 | 159 | s.deleted, s.clean)] |
|
160 | 160 | |
|
161 | 161 | def warn(files, msg): |
|
162 | 162 | for f in files: |
|
163 | 163 | ui.warn(msg % uipathfn(f)) |
|
164 | 164 | return int(len(files) > 0) |
|
165 | 165 | |
|
166 | 166 | if after: |
|
167 | 167 | remove = deleted |
|
168 | 168 | result = warn(modified + added + clean, |
|
169 | 169 | _('not removing %s: file still exists\n')) |
|
170 | 170 | else: |
|
171 | 171 | remove = deleted + clean |
|
172 | 172 | result = warn(modified, _('not removing %s: file is modified (use -f' |
|
173 | 173 | ' to force removal)\n')) |
|
174 | 174 | result = warn(added, _('not removing %s: file has been marked for add' |
|
175 | 175 | ' (use forget to undo)\n')) or result |
|
176 | 176 | |
|
177 | 177 | # Need to lock because standin files are deleted then removed from the |
|
178 | 178 | # repository and we could race in-between. |
|
179 | 179 | with repo.wlock(): |
|
180 | 180 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
181 | 181 | for f in sorted(remove): |
|
182 | 182 | if ui.verbose or not m.exact(f): |
|
183 | 183 | ui.status(_('removing %s\n') % uipathfn(f)) |
|
184 | 184 | |
|
185 | 185 | if not dryrun: |
|
186 | 186 | if not after: |
|
187 | 187 | repo.wvfs.unlinkpath(f, ignoremissing=True) |
|
188 | 188 | |
|
189 | 189 | if dryrun: |
|
190 | 190 | return result |
|
191 | 191 | |
|
192 | 192 | remove = [lfutil.standin(f) for f in remove] |
|
193 | 193 | # If this is being called by addremove, let the original addremove |
|
194 | 194 | # function handle this. |
|
195 | 195 | if not isaddremove: |
|
196 | 196 | for f in remove: |
|
197 | 197 | repo.wvfs.unlinkpath(f, ignoremissing=True) |
|
198 | 198 | repo[None].forget(remove) |
|
199 | 199 | |
|
200 | 200 | for f in remove: |
|
201 | 201 | lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f), |
|
202 | 202 | False) |
|
203 | 203 | |
|
204 | 204 | lfdirstate.write() |
|
205 | 205 | |
|
206 | 206 | return result |
|
207 | 207 | |
|
208 | 208 | # For overriding mercurial.hgweb.webcommands so that largefiles will |
|
209 | 209 | # appear at their right place in the manifests. |
|
210 | 210 | @eh.wrapfunction(webcommands, 'decodepath') |
|
211 | 211 | def decodepath(orig, path): |
|
212 | 212 | return lfutil.splitstandin(path) or path |
|
213 | 213 | |
|
214 | 214 | # -- Wrappers: modify existing commands -------------------------------- |
|
215 | 215 | |
|
216 | 216 | @eh.wrapcommand('add', |
|
217 | 217 | opts=[('', 'large', None, _('add as largefile')), |
|
218 | 218 | ('', 'normal', None, _('add as normal file')), |
|
219 | 219 | ('', 'lfsize', '', _('add all files above this size (in megabytes) ' |
|
220 | 220 | 'as largefiles (default: 10)'))]) |
|
221 | 221 | def overrideadd(orig, ui, repo, *pats, **opts): |
|
222 | 222 | if opts.get(r'normal') and opts.get(r'large'): |
|
223 | 223 | raise error.Abort(_('--normal cannot be used with --large')) |
|
224 | 224 | return orig(ui, repo, *pats, **opts) |
|
225 | 225 | |
|
226 | 226 | @eh.wrapfunction(cmdutil, 'add') |
|
227 | 227 | def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts): |
|
228 | 228 | # The --normal flag short circuits this override |
|
229 | 229 | if opts.get(r'normal'): |
|
230 | 230 | return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts) |
|
231 | 231 | |
|
232 | 232 | ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts) |
|
233 | 233 | normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(), |
|
234 | 234 | ladded) |
|
235 | 235 | bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts) |
|
236 | 236 | |
|
237 | 237 | bad.extend(f for f in lbad) |
|
238 | 238 | return bad |
|
239 | 239 | |
|
240 | 240 | @eh.wrapfunction(cmdutil, 'remove') |
|
241 | 241 | def cmdutilremove(orig, ui, repo, matcher, prefix, uipathfn, after, force, |
|
242 | 242 | subrepos, dryrun): |
|
243 | 243 | normalmatcher = composenormalfilematcher(matcher, repo[None].manifest()) |
|
244 | 244 | result = orig(ui, repo, normalmatcher, prefix, uipathfn, after, force, |
|
245 | 245 | subrepos, dryrun) |
|
246 | 246 | return removelargefiles(ui, repo, False, matcher, uipathfn, dryrun, |
|
247 | 247 | after=after, force=force) or result |
|
248 | 248 | |
|
249 | 249 | @eh.wrapfunction(subrepo.hgsubrepo, 'status') |
|
250 | 250 | def overridestatusfn(orig, repo, rev2, **opts): |
|
251 | 251 | try: |
|
252 | 252 | repo._repo.lfstatus = True |
|
253 | 253 | return orig(repo, rev2, **opts) |
|
254 | 254 | finally: |
|
255 | 255 | repo._repo.lfstatus = False |
|
256 | 256 | |
|
257 | 257 | @eh.wrapcommand('status') |
|
258 | 258 | def overridestatus(orig, ui, repo, *pats, **opts): |
|
259 | 259 | try: |
|
260 | 260 | repo.lfstatus = True |
|
261 | 261 | return orig(ui, repo, *pats, **opts) |
|
262 | 262 | finally: |
|
263 | 263 | repo.lfstatus = False |
|
264 | 264 | |
|
265 | 265 | @eh.wrapfunction(subrepo.hgsubrepo, 'dirty') |
|
266 | 266 | def overridedirty(orig, repo, ignoreupdate=False, missing=False): |
|
267 | 267 | try: |
|
268 | 268 | repo._repo.lfstatus = True |
|
269 | 269 | return orig(repo, ignoreupdate=ignoreupdate, missing=missing) |
|
270 | 270 | finally: |
|
271 | 271 | repo._repo.lfstatus = False |
|
272 | 272 | |
|
273 | 273 | @eh.wrapcommand('log') |
|
274 | 274 | def overridelog(orig, ui, repo, *pats, **opts): |
|
275 | 275 | def overridematchandpats(orig, ctx, pats=(), opts=None, globbed=False, |
|
276 | 276 | default='relpath', badfn=None): |
|
277 | 277 | """Matcher that merges root directory with .hglf, suitable for log. |
|
278 | 278 | It is still possible to match .hglf directly. |
|
279 | 279 | For any listed files run log on the standin too. |
|
280 | 280 | matchfn tries both the given filename and with .hglf stripped. |
|
281 | 281 | """ |
|
282 | 282 | if opts is None: |
|
283 | 283 | opts = {} |
|
284 | 284 | matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn) |
|
285 | 285 | m, p = copy.copy(matchandpats) |
|
286 | 286 | |
|
287 | 287 | if m.always(): |
|
288 | 288 | # We want to match everything anyway, so there's no benefit trying |
|
289 | 289 | # to add standins. |
|
290 | 290 | return matchandpats |
|
291 | 291 | |
|
292 | 292 | pats = set(p) |
|
293 | 293 | |
|
294 | 294 | def fixpats(pat, tostandin=lfutil.standin): |
|
295 | 295 | if pat.startswith('set:'): |
|
296 | 296 | return pat |
|
297 | 297 | |
|
298 | 298 | kindpat = matchmod._patsplit(pat, None) |
|
299 | 299 | |
|
300 | 300 | if kindpat[0] is not None: |
|
301 | 301 | return kindpat[0] + ':' + tostandin(kindpat[1]) |
|
302 | 302 | return tostandin(kindpat[1]) |
|
303 | 303 | |
|
304 | 304 | cwd = repo.getcwd() |
|
305 | 305 | if cwd: |
|
306 | 306 | hglf = lfutil.shortname |
|
307 | 307 | back = util.pconvert(repo.pathto(hglf)[:-len(hglf)]) |
|
308 | 308 | |
|
309 | 309 | def tostandin(f): |
|
310 | 310 | # The file may already be a standin, so truncate the back |
|
311 | 311 | # prefix and test before mangling it. This avoids turning |
|
312 | 312 | # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'. |
|
313 | 313 | if f.startswith(back) and lfutil.splitstandin(f[len(back):]): |
|
314 | 314 | return f |
|
315 | 315 | |
|
316 | 316 | # An absolute path is from outside the repo, so truncate the |
|
317 | 317 | # path to the root before building the standin. Otherwise cwd |
|
318 | 318 | # is somewhere in the repo, relative to root, and needs to be |
|
319 | 319 | # prepended before building the standin. |
|
320 | 320 | if os.path.isabs(cwd): |
|
321 | 321 | f = f[len(back):] |
|
322 | 322 | else: |
|
323 | 323 | f = cwd + '/' + f |
|
324 | 324 | return back + lfutil.standin(f) |
|
325 | 325 | else: |
|
326 | 326 | def tostandin(f): |
|
327 | 327 | if lfutil.isstandin(f): |
|
328 | 328 | return f |
|
329 | 329 | return lfutil.standin(f) |
|
330 | 330 | pats.update(fixpats(f, tostandin) for f in p) |
|
331 | 331 | |
|
332 | 332 | for i in range(0, len(m._files)): |
|
333 | 333 | # Don't add '.hglf' to m.files, since that is already covered by '.' |
|
334 | 334 | if m._files[i] == '.': |
|
335 | 335 | continue |
|
336 | 336 | standin = lfutil.standin(m._files[i]) |
|
337 | 337 | # If the "standin" is a directory, append instead of replace to |
|
338 | 338 | # support naming a directory on the command line with only |
|
339 | 339 | # largefiles. The original directory is kept to support normal |
|
340 | 340 | # files. |
|
341 | 341 | if standin in ctx: |
|
342 | 342 | m._files[i] = standin |
|
343 | 343 | elif m._files[i] not in ctx and repo.wvfs.isdir(standin): |
|
344 | 344 | m._files.append(standin) |
|
345 | 345 | |
|
346 | 346 | m._fileset = set(m._files) |
|
347 | 347 | m.always = lambda: False |
|
348 | 348 | origmatchfn = m.matchfn |
|
349 | 349 | def lfmatchfn(f): |
|
350 | 350 | lf = lfutil.splitstandin(f) |
|
351 | 351 | if lf is not None and origmatchfn(lf): |
|
352 | 352 | return True |
|
353 | 353 | r = origmatchfn(f) |
|
354 | 354 | return r |
|
355 | 355 | m.matchfn = lfmatchfn |
|
356 | 356 | |
|
357 | 357 | ui.debug('updated patterns: %s\n' % ', '.join(sorted(pats))) |
|
358 | 358 | return m, pats |
|
359 | 359 | |
|
360 | 360 | # For hg log --patch, the match object is used in two different senses: |
|
361 | 361 | # (1) to determine what revisions should be printed out, and |
|
362 | 362 | # (2) to determine what files to print out diffs for. |
|
363 | 363 | # The magic matchandpats override should be used for case (1) but not for |
|
364 | 364 | # case (2). |
|
365 | 365 | oldmatchandpats = scmutil.matchandpats |
|
366 | 366 | def overridemakefilematcher(orig, repo, pats, opts, badfn=None): |
|
367 | 367 | wctx = repo[None] |
|
368 | 368 | match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn) |
|
369 | 369 | return lambda ctx: match |
|
370 | 370 | |
|
371 | 371 | wrappedmatchandpats = extensions.wrappedfunction(scmutil, 'matchandpats', |
|
372 | 372 | overridematchandpats) |
|
373 | 373 | wrappedmakefilematcher = extensions.wrappedfunction( |
|
374 | 374 | logcmdutil, '_makenofollowfilematcher', overridemakefilematcher) |
|
375 | 375 | with wrappedmatchandpats, wrappedmakefilematcher: |
|
376 | 376 | return orig(ui, repo, *pats, **opts) |
|
377 | 377 | |
|
378 | 378 | @eh.wrapcommand('verify', |
|
379 | 379 | opts=[('', 'large', None, |
|
380 | 380 | _('verify that all largefiles in current revision exists')), |
|
381 | 381 | ('', 'lfa', None, |
|
382 | 382 | _('verify largefiles in all revisions, not just current')), |
|
383 | 383 | ('', 'lfc', None, |
|
384 | 384 | _('verify local largefile contents, not just existence'))]) |
|
385 | 385 | def overrideverify(orig, ui, repo, *pats, **opts): |
|
386 | 386 | large = opts.pop(r'large', False) |
|
387 | 387 | all = opts.pop(r'lfa', False) |
|
388 | 388 | contents = opts.pop(r'lfc', False) |
|
389 | 389 | |
|
390 | 390 | result = orig(ui, repo, *pats, **opts) |
|
391 | 391 | if large or all or contents: |
|
392 | 392 | result = result or lfcommands.verifylfiles(ui, repo, all, contents) |
|
393 | 393 | return result |
|
394 | 394 | |
|
395 | 395 | @eh.wrapcommand('debugstate', |
|
396 | 396 | opts=[('', 'large', None, _('display largefiles dirstate'))]) |
|
397 | 397 | def overridedebugstate(orig, ui, repo, *pats, **opts): |
|
398 | 398 | large = opts.pop(r'large', False) |
|
399 | 399 | if large: |
|
400 | 400 | class fakerepo(object): |
|
401 | 401 | dirstate = lfutil.openlfdirstate(ui, repo) |
|
402 | 402 | orig(ui, fakerepo, *pats, **opts) |
|
403 | 403 | else: |
|
404 | 404 | orig(ui, repo, *pats, **opts) |
|
405 | 405 | |
|
406 | 406 | # Before starting the manifest merge, merge.updates will call |
|
407 | 407 | # _checkunknownfile to check if there are any files in the merged-in |
|
408 | 408 | # changeset that collide with unknown files in the working copy. |
|
409 | 409 | # |
|
410 | 410 | # The largefiles are seen as unknown, so this prevents us from merging |
|
411 | 411 | # in a file 'foo' if we already have a largefile with the same name. |
|
412 | 412 | # |
|
413 | 413 | # The overridden function filters the unknown files by removing any |
|
414 | 414 | # largefiles. This makes the merge proceed and we can then handle this |
|
415 | 415 | # case further in the overridden calculateupdates function below. |
|
416 | 416 | @eh.wrapfunction(merge, '_checkunknownfile') |
|
417 | 417 | def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None): |
|
418 | 418 | if lfutil.standin(repo.dirstate.normalize(f)) in wctx: |
|
419 | 419 | return False |
|
420 | 420 | return origfn(repo, wctx, mctx, f, f2) |
|
421 | 421 | |
|
422 | 422 | # The manifest merge handles conflicts on the manifest level. We want |
|
423 | 423 | # to handle changes in largefile-ness of files at this level too. |
|
424 | 424 | # |
|
425 | 425 | # The strategy is to run the original calculateupdates and then process |
|
426 | 426 | # the action list it outputs. There are two cases we need to deal with: |
|
427 | 427 | # |
|
428 | 428 | # 1. Normal file in p1, largefile in p2. Here the largefile is |
|
429 | 429 | # detected via its standin file, which will enter the working copy |
|
430 | 430 | # with a "get" action. It is not "merge" since the standin is all |
|
431 | 431 | # Mercurial is concerned with at this level -- the link to the |
|
432 | 432 | # existing normal file is not relevant here. |
|
433 | 433 | # |
|
434 | 434 | # 2. Largefile in p1, normal file in p2. Here we get a "merge" action |
|
435 | 435 | # since the largefile will be present in the working copy and |
|
436 | 436 | # different from the normal file in p2. Mercurial therefore |
|
437 | 437 | # triggers a merge action. |
|
438 | 438 | # |
|
439 | 439 | # In both cases, we prompt the user and emit new actions to either |
|
440 | 440 | # remove the standin (if the normal file was kept) or to remove the |
|
441 | 441 | # normal file and get the standin (if the largefile was kept). The |
|
442 | 442 | # default prompt answer is to use the largefile version since it was |
|
443 | 443 | # presumably changed on purpose. |
|
444 | 444 | # |
|
445 | 445 | # Finally, the merge.applyupdates function will then take care of |
|
446 | 446 | # writing the files into the working copy and lfcommands.updatelfiles |
|
447 | 447 | # will update the largefiles. |
|
448 | 448 | @eh.wrapfunction(merge, 'calculateupdates') |
|
449 | 449 | def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force, |
|
450 | 450 | acceptremote, *args, **kwargs): |
|
451 | 451 | overwrite = force and not branchmerge |
|
452 | 452 | actions, diverge, renamedelete = origfn( |
|
453 | 453 | repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs) |
|
454 | 454 | |
|
455 | 455 | if overwrite: |
|
456 | 456 | return actions, diverge, renamedelete |
|
457 | 457 | |
|
458 | 458 | # Convert to dictionary with filename as key and action as value. |
|
459 | 459 | lfiles = set() |
|
460 | 460 | for f in actions: |
|
461 | 461 | splitstandin = lfutil.splitstandin(f) |
|
462 | 462 | if splitstandin in p1: |
|
463 | 463 | lfiles.add(splitstandin) |
|
464 | 464 | elif lfutil.standin(f) in p1: |
|
465 | 465 | lfiles.add(f) |
|
466 | 466 | |
|
467 | 467 | for lfile in sorted(lfiles): |
|
468 | 468 | standin = lfutil.standin(lfile) |
|
469 | 469 | (lm, largs, lmsg) = actions.get(lfile, (None, None, None)) |
|
470 | 470 | (sm, sargs, smsg) = actions.get(standin, (None, None, None)) |
|
471 | 471 | if sm in ('g', 'dc') and lm != 'r': |
|
472 | 472 | if sm == 'dc': |
|
473 | 473 | f1, f2, fa, move, anc = sargs |
|
474 | 474 | sargs = (p2[f2].flags(), False) |
|
475 | 475 | # Case 1: normal file in the working copy, largefile in |
|
476 | 476 | # the second parent |
|
477 | 477 | usermsg = _('remote turned local normal file %s into a largefile\n' |
|
478 | 478 | 'use (l)argefile or keep (n)ormal file?' |
|
479 | 479 | '$$ &Largefile $$ &Normal file') % lfile |
|
480 | 480 | if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile |
|
481 | 481 | actions[lfile] = ('r', None, 'replaced by standin') |
|
482 | 482 | actions[standin] = ('g', sargs, 'replaces standin') |
|
483 | 483 | else: # keep local normal file |
|
484 | 484 | actions[lfile] = ('k', None, 'replaces standin') |
|
485 | 485 | if branchmerge: |
|
486 | 486 | actions[standin] = ('k', None, 'replaced by non-standin') |
|
487 | 487 | else: |
|
488 | 488 | actions[standin] = ('r', None, 'replaced by non-standin') |
|
489 | 489 | elif lm in ('g', 'dc') and sm != 'r': |
|
490 | 490 | if lm == 'dc': |
|
491 | 491 | f1, f2, fa, move, anc = largs |
|
492 | 492 | largs = (p2[f2].flags(), False) |
|
493 | 493 | # Case 2: largefile in the working copy, normal file in |
|
494 | 494 | # the second parent |
|
495 | 495 | usermsg = _('remote turned local largefile %s into a normal file\n' |
|
496 | 496 | 'keep (l)argefile or use (n)ormal file?' |
|
497 | 497 | '$$ &Largefile $$ &Normal file') % lfile |
|
498 | 498 | if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile |
|
499 | 499 | if branchmerge: |
|
500 | 500 | # largefile can be restored from standin safely |
|
501 | 501 | actions[lfile] = ('k', None, 'replaced by standin') |
|
502 | 502 | actions[standin] = ('k', None, 'replaces standin') |
|
503 | 503 | else: |
|
504 | 504 | # "lfile" should be marked as "removed" without |
|
505 | 505 | # removal of itself |
|
506 | 506 | actions[lfile] = ('lfmr', None, |
|
507 | 507 | 'forget non-standin largefile') |
|
508 | 508 | |
|
509 | 509 | # linear-merge should treat this largefile as 're-added' |
|
510 | 510 | actions[standin] = ('a', None, 'keep standin') |
|
511 | 511 | else: # pick remote normal file |
|
512 | 512 | actions[lfile] = ('g', largs, 'replaces standin') |
|
513 | 513 | actions[standin] = ('r', None, 'replaced by non-standin') |
|
514 | 514 | |
|
515 | 515 | return actions, diverge, renamedelete |
|
516 | 516 | |
|
517 | 517 | @eh.wrapfunction(merge, 'recordupdates') |
|
518 | def mergerecordupdates(orig, repo, actions, branchmerge): | |
|
518 | def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata): | |
|
519 | 519 | if 'lfmr' in actions: |
|
520 | 520 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) |
|
521 | 521 | for lfile, args, msg in actions['lfmr']: |
|
522 | 522 | # this should be executed before 'orig', to execute 'remove' |
|
523 | 523 | # before all other actions |
|
524 | 524 | repo.dirstate.remove(lfile) |
|
525 | 525 | # make sure lfile doesn't get synclfdirstate'd as normal |
|
526 | 526 | lfdirstate.add(lfile) |
|
527 | 527 | lfdirstate.write() |
|
528 | 528 | |
|
529 | return orig(repo, actions, branchmerge) | |
|
529 | return orig(repo, actions, branchmerge, getfiledata) | |
|
530 | 530 | |
|
531 | 531 | # Override filemerge to prompt the user about how they wish to merge |
|
532 | 532 | # largefiles. This will handle identical edits without prompting the user. |
|
533 | 533 | @eh.wrapfunction(filemerge, '_filemerge') |
|
534 | 534 | def overridefilemerge(origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, |
|
535 | 535 | labels=None): |
|
536 | 536 | if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent(): |
|
537 | 537 | return origfn(premerge, repo, wctx, mynode, orig, fcd, fco, fca, |
|
538 | 538 | labels=labels) |
|
539 | 539 | |
|
540 | 540 | ahash = lfutil.readasstandin(fca).lower() |
|
541 | 541 | dhash = lfutil.readasstandin(fcd).lower() |
|
542 | 542 | ohash = lfutil.readasstandin(fco).lower() |
|
543 | 543 | if (ohash != ahash and |
|
544 | 544 | ohash != dhash and |
|
545 | 545 | (dhash == ahash or |
|
546 | 546 | repo.ui.promptchoice( |
|
547 | 547 | _('largefile %s has a merge conflict\nancestor was %s\n' |
|
548 | 548 | 'keep (l)ocal %s or\ntake (o)ther %s?' |
|
549 | 549 | '$$ &Local $$ &Other') % |
|
550 | 550 | (lfutil.splitstandin(orig), ahash, dhash, ohash), |
|
551 | 551 | 0) == 1)): |
|
552 | 552 | repo.wwrite(fcd.path(), fco.data(), fco.flags()) |
|
553 | 553 | return True, 0, False |
|
554 | 554 | |
|
555 | 555 | @eh.wrapfunction(copiesmod, 'pathcopies') |
|
556 | 556 | def copiespathcopies(orig, ctx1, ctx2, match=None): |
|
557 | 557 | copies = orig(ctx1, ctx2, match=match) |
|
558 | 558 | updated = {} |
|
559 | 559 | |
|
560 | 560 | for k, v in copies.iteritems(): |
|
561 | 561 | updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v |
|
562 | 562 | |
|
563 | 563 | return updated |
|
564 | 564 | |
|
565 | 565 | # Copy first changes the matchers to match standins instead of |
|
566 | 566 | # largefiles. Then it overrides util.copyfile in that function it |
|
567 | 567 | # checks if the destination largefile already exists. It also keeps a |
|
568 | 568 | # list of copied files so that the largefiles can be copied and the |
|
569 | 569 | # dirstate updated. |
|
570 | 570 | @eh.wrapfunction(cmdutil, 'copy') |
|
571 | 571 | def overridecopy(orig, ui, repo, pats, opts, rename=False): |
|
572 | 572 | # doesn't remove largefile on rename |
|
573 | 573 | if len(pats) < 2: |
|
574 | 574 | # this isn't legal, let the original function deal with it |
|
575 | 575 | return orig(ui, repo, pats, opts, rename) |
|
576 | 576 | |
|
577 | 577 | # This could copy both lfiles and normal files in one command, |
|
578 | 578 | # but we don't want to do that. First replace their matcher to |
|
579 | 579 | # only match normal files and run it, then replace it to just |
|
580 | 580 | # match largefiles and run it again. |
|
581 | 581 | nonormalfiles = False |
|
582 | 582 | nolfiles = False |
|
583 | 583 | manifest = repo[None].manifest() |
|
584 | 584 | def normalfilesmatchfn(orig, ctx, pats=(), opts=None, globbed=False, |
|
585 | 585 | default='relpath', badfn=None): |
|
586 | 586 | if opts is None: |
|
587 | 587 | opts = {} |
|
588 | 588 | match = orig(ctx, pats, opts, globbed, default, badfn=badfn) |
|
589 | 589 | return composenormalfilematcher(match, manifest) |
|
590 | 590 | with extensions.wrappedfunction(scmutil, 'match', normalfilesmatchfn): |
|
591 | 591 | try: |
|
592 | 592 | result = orig(ui, repo, pats, opts, rename) |
|
593 | 593 | except error.Abort as e: |
|
594 | 594 | if pycompat.bytestr(e) != _('no files to copy'): |
|
595 | 595 | raise e |
|
596 | 596 | else: |
|
597 | 597 | nonormalfiles = True |
|
598 | 598 | result = 0 |
|
599 | 599 | |
|
600 | 600 | # The first rename can cause our current working directory to be removed. |
|
601 | 601 | # In that case there is nothing left to copy/rename so just quit. |
|
602 | 602 | try: |
|
603 | 603 | repo.getcwd() |
|
604 | 604 | except OSError: |
|
605 | 605 | return result |
|
606 | 606 | |
|
607 | 607 | def makestandin(relpath): |
|
608 | 608 | path = pathutil.canonpath(repo.root, repo.getcwd(), relpath) |
|
609 | 609 | return repo.wvfs.join(lfutil.standin(path)) |
|
610 | 610 | |
|
611 | 611 | fullpats = scmutil.expandpats(pats) |
|
612 | 612 | dest = fullpats[-1] |
|
613 | 613 | |
|
614 | 614 | if os.path.isdir(dest): |
|
615 | 615 | if not os.path.isdir(makestandin(dest)): |
|
616 | 616 | os.makedirs(makestandin(dest)) |
|
617 | 617 | |
|
618 | 618 | try: |
|
619 | 619 | # When we call orig below it creates the standins but we don't add |
|
620 | 620 | # them to the dir state until later so lock during that time. |
|
621 | 621 | wlock = repo.wlock() |
|
622 | 622 | |
|
623 | 623 | manifest = repo[None].manifest() |
|
624 | 624 | def overridematch(orig, ctx, pats=(), opts=None, globbed=False, |
|
625 | 625 | default='relpath', badfn=None): |
|
626 | 626 | if opts is None: |
|
627 | 627 | opts = {} |
|
628 | 628 | newpats = [] |
|
629 | 629 | # The patterns were previously mangled to add the standin |
|
630 | 630 | # directory; we need to remove that now |
|
631 | 631 | for pat in pats: |
|
632 | 632 | if matchmod.patkind(pat) is None and lfutil.shortname in pat: |
|
633 | 633 | newpats.append(pat.replace(lfutil.shortname, '')) |
|
634 | 634 | else: |
|
635 | 635 | newpats.append(pat) |
|
636 | 636 | match = orig(ctx, newpats, opts, globbed, default, badfn=badfn) |
|
637 | 637 | m = copy.copy(match) |
|
638 | 638 | lfile = lambda f: lfutil.standin(f) in manifest |
|
639 | 639 | m._files = [lfutil.standin(f) for f in m._files if lfile(f)] |
|
640 | 640 | m._fileset = set(m._files) |
|
641 | 641 | origmatchfn = m.matchfn |
|
642 | 642 | def matchfn(f): |
|
643 | 643 | lfile = lfutil.splitstandin(f) |
|
644 | 644 | return (lfile is not None and |
|
645 | 645 | (f in manifest) and |
|
646 | 646 | origmatchfn(lfile) or |
|
647 | 647 | None) |
|
648 | 648 | m.matchfn = matchfn |
|
649 | 649 | return m |
|
650 | 650 | listpats = [] |
|
651 | 651 | for pat in pats: |
|
652 | 652 | if matchmod.patkind(pat) is not None: |
|
653 | 653 | listpats.append(pat) |
|
654 | 654 | else: |
|
655 | 655 | listpats.append(makestandin(pat)) |
|
656 | 656 | |
|
657 | 657 | copiedfiles = [] |
|
658 | 658 | def overridecopyfile(orig, src, dest, *args, **kwargs): |
|
659 | 659 | if (lfutil.shortname in src and |
|
660 | 660 | dest.startswith(repo.wjoin(lfutil.shortname))): |
|
661 | 661 | destlfile = dest.replace(lfutil.shortname, '') |
|
662 | 662 | if not opts['force'] and os.path.exists(destlfile): |
|
663 | 663 | raise IOError('', |
|
664 | 664 | _('destination largefile already exists')) |
|
665 | 665 | copiedfiles.append((src, dest)) |
|
666 | 666 | orig(src, dest, *args, **kwargs) |
|
667 | 667 | with extensions.wrappedfunction(util, 'copyfile', overridecopyfile): |
|
668 | 668 | with extensions.wrappedfunction(scmutil, 'match', overridematch): |
|
669 | 669 | result += orig(ui, repo, listpats, opts, rename) |
|
670 | 670 | |
|
671 | 671 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
672 | 672 | for (src, dest) in copiedfiles: |
|
673 | 673 | if (lfutil.shortname in src and |
|
674 | 674 | dest.startswith(repo.wjoin(lfutil.shortname))): |
|
675 | 675 | srclfile = src.replace(repo.wjoin(lfutil.standin('')), '') |
|
676 | 676 | destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '') |
|
677 | 677 | destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.' |
|
678 | 678 | if not os.path.isdir(destlfiledir): |
|
679 | 679 | os.makedirs(destlfiledir) |
|
680 | 680 | if rename: |
|
681 | 681 | os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile)) |
|
682 | 682 | |
|
683 | 683 | # The file is gone, but this deletes any empty parent |
|
684 | 684 | # directories as a side-effect. |
|
685 | 685 | repo.wvfs.unlinkpath(srclfile, ignoremissing=True) |
|
686 | 686 | lfdirstate.remove(srclfile) |
|
687 | 687 | else: |
|
688 | 688 | util.copyfile(repo.wjoin(srclfile), |
|
689 | 689 | repo.wjoin(destlfile)) |
|
690 | 690 | |
|
691 | 691 | lfdirstate.add(destlfile) |
|
692 | 692 | lfdirstate.write() |
|
693 | 693 | except error.Abort as e: |
|
694 | 694 | if pycompat.bytestr(e) != _('no files to copy'): |
|
695 | 695 | raise e |
|
696 | 696 | else: |
|
697 | 697 | nolfiles = True |
|
698 | 698 | finally: |
|
699 | 699 | wlock.release() |
|
700 | 700 | |
|
701 | 701 | if nolfiles and nonormalfiles: |
|
702 | 702 | raise error.Abort(_('no files to copy')) |
|
703 | 703 | |
|
704 | 704 | return result |
|
705 | 705 | |
|
706 | 706 | # When the user calls revert, we have to be careful to not revert any |
|
707 | 707 | # changes to other largefiles accidentally. This means we have to keep |
|
708 | 708 | # track of the largefiles that are being reverted so we only pull down |
|
709 | 709 | # the necessary largefiles. |
|
710 | 710 | # |
|
711 | 711 | # Standins are only updated (to match the hash of largefiles) before |
|
712 | 712 | # commits. Update the standins then run the original revert, changing |
|
713 | 713 | # the matcher to hit standins instead of largefiles. Based on the |
|
714 | 714 | # resulting standins update the largefiles. |
|
715 | 715 | @eh.wrapfunction(cmdutil, 'revert') |
|
716 | 716 | def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts): |
|
717 | 717 | # Because we put the standins in a bad state (by updating them) |
|
718 | 718 | # and then return them to a correct state we need to lock to |
|
719 | 719 | # prevent others from changing them in their incorrect state. |
|
720 | 720 | with repo.wlock(): |
|
721 | 721 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
722 | 722 | s = lfutil.lfdirstatestatus(lfdirstate, repo) |
|
723 | 723 | lfdirstate.write() |
|
724 | 724 | for lfile in s.modified: |
|
725 | 725 | lfutil.updatestandin(repo, lfile, lfutil.standin(lfile)) |
|
726 | 726 | for lfile in s.deleted: |
|
727 | 727 | fstandin = lfutil.standin(lfile) |
|
728 | 728 | if (repo.wvfs.exists(fstandin)): |
|
729 | 729 | repo.wvfs.unlink(fstandin) |
|
730 | 730 | |
|
731 | 731 | oldstandins = lfutil.getstandinsstate(repo) |
|
732 | 732 | |
|
733 | 733 | def overridematch(orig, mctx, pats=(), opts=None, globbed=False, |
|
734 | 734 | default='relpath', badfn=None): |
|
735 | 735 | if opts is None: |
|
736 | 736 | opts = {} |
|
737 | 737 | match = orig(mctx, pats, opts, globbed, default, badfn=badfn) |
|
738 | 738 | m = copy.copy(match) |
|
739 | 739 | |
|
740 | 740 | # revert supports recursing into subrepos, and though largefiles |
|
741 | 741 | # currently doesn't work correctly in that case, this match is |
|
742 | 742 | # called, so the lfdirstate above may not be the correct one for |
|
743 | 743 | # this invocation of match. |
|
744 | 744 | lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(), |
|
745 | 745 | False) |
|
746 | 746 | |
|
747 | 747 | wctx = repo[None] |
|
748 | 748 | matchfiles = [] |
|
749 | 749 | for f in m._files: |
|
750 | 750 | standin = lfutil.standin(f) |
|
751 | 751 | if standin in ctx or standin in mctx: |
|
752 | 752 | matchfiles.append(standin) |
|
753 | 753 | elif standin in wctx or lfdirstate[f] == 'r': |
|
754 | 754 | continue |
|
755 | 755 | else: |
|
756 | 756 | matchfiles.append(f) |
|
757 | 757 | m._files = matchfiles |
|
758 | 758 | m._fileset = set(m._files) |
|
759 | 759 | origmatchfn = m.matchfn |
|
760 | 760 | def matchfn(f): |
|
761 | 761 | lfile = lfutil.splitstandin(f) |
|
762 | 762 | if lfile is not None: |
|
763 | 763 | return (origmatchfn(lfile) and |
|
764 | 764 | (f in ctx or f in mctx)) |
|
765 | 765 | return origmatchfn(f) |
|
766 | 766 | m.matchfn = matchfn |
|
767 | 767 | return m |
|
768 | 768 | with extensions.wrappedfunction(scmutil, 'match', overridematch): |
|
769 | 769 | orig(ui, repo, ctx, parents, *pats, **opts) |
|
770 | 770 | |
|
771 | 771 | newstandins = lfutil.getstandinsstate(repo) |
|
772 | 772 | filelist = lfutil.getlfilestoupdate(oldstandins, newstandins) |
|
773 | 773 | # lfdirstate should be 'normallookup'-ed for updated files, |
|
774 | 774 | # because reverting doesn't touch dirstate for 'normal' files |
|
775 | 775 | # when target revision is explicitly specified: in such case, |
|
776 | 776 | # 'n' and valid timestamp in dirstate doesn't ensure 'clean' |
|
777 | 777 | # of target (standin) file. |
|
778 | 778 | lfcommands.updatelfiles(ui, repo, filelist, printmessage=False, |
|
779 | 779 | normallookup=True) |
|
780 | 780 | |
|
781 | 781 | # after pulling changesets, we need to take some extra care to get |
|
782 | 782 | # largefiles updated remotely |
|
783 | 783 | @eh.wrapcommand('pull', |
|
784 | 784 | opts=[('', 'all-largefiles', None, |
|
785 | 785 | _('download all pulled versions of largefiles (DEPRECATED)')), |
|
786 | 786 | ('', 'lfrev', [], |
|
787 | 787 | _('download largefiles for these revisions'), _('REV'))]) |
|
788 | 788 | def overridepull(orig, ui, repo, source=None, **opts): |
|
789 | 789 | revsprepull = len(repo) |
|
790 | 790 | if not source: |
|
791 | 791 | source = 'default' |
|
792 | 792 | repo.lfpullsource = source |
|
793 | 793 | result = orig(ui, repo, source, **opts) |
|
794 | 794 | revspostpull = len(repo) |
|
795 | 795 | lfrevs = opts.get(r'lfrev', []) |
|
796 | 796 | if opts.get(r'all_largefiles'): |
|
797 | 797 | lfrevs.append('pulled()') |
|
798 | 798 | if lfrevs and revspostpull > revsprepull: |
|
799 | 799 | numcached = 0 |
|
800 | 800 | repo.firstpulled = revsprepull # for pulled() revset expression |
|
801 | 801 | try: |
|
802 | 802 | for rev in scmutil.revrange(repo, lfrevs): |
|
803 | 803 | ui.note(_('pulling largefiles for revision %d\n') % rev) |
|
804 | 804 | (cached, missing) = lfcommands.cachelfiles(ui, repo, rev) |
|
805 | 805 | numcached += len(cached) |
|
806 | 806 | finally: |
|
807 | 807 | del repo.firstpulled |
|
808 | 808 | ui.status(_("%d largefiles cached\n") % numcached) |
|
809 | 809 | return result |
|
810 | 810 | |
|
811 | 811 | @eh.wrapcommand('push', |
|
812 | 812 | opts=[('', 'lfrev', [], |
|
813 | 813 | _('upload largefiles for these revisions'), _('REV'))]) |
|
814 | 814 | def overridepush(orig, ui, repo, *args, **kwargs): |
|
815 | 815 | """Override push command and store --lfrev parameters in opargs""" |
|
816 | 816 | lfrevs = kwargs.pop(r'lfrev', None) |
|
817 | 817 | if lfrevs: |
|
818 | 818 | opargs = kwargs.setdefault(r'opargs', {}) |
|
819 | 819 | opargs['lfrevs'] = scmutil.revrange(repo, lfrevs) |
|
820 | 820 | return orig(ui, repo, *args, **kwargs) |
|
821 | 821 | |
|
822 | 822 | @eh.wrapfunction(exchange, 'pushoperation') |
|
823 | 823 | def exchangepushoperation(orig, *args, **kwargs): |
|
824 | 824 | """Override pushoperation constructor and store lfrevs parameter""" |
|
825 | 825 | lfrevs = kwargs.pop(r'lfrevs', None) |
|
826 | 826 | pushop = orig(*args, **kwargs) |
|
827 | 827 | pushop.lfrevs = lfrevs |
|
828 | 828 | return pushop |
|
829 | 829 | |
|
830 | 830 | @eh.revsetpredicate('pulled()') |
|
831 | 831 | def pulledrevsetsymbol(repo, subset, x): |
|
832 | 832 | """Changesets that just has been pulled. |
|
833 | 833 | |
|
834 | 834 | Only available with largefiles from pull --lfrev expressions. |
|
835 | 835 | |
|
836 | 836 | .. container:: verbose |
|
837 | 837 | |
|
838 | 838 | Some examples: |
|
839 | 839 | |
|
840 | 840 | - pull largefiles for all new changesets:: |
|
841 | 841 | |
|
842 | 842 | hg pull -lfrev "pulled()" |
|
843 | 843 | |
|
844 | 844 | - pull largefiles for all new branch heads:: |
|
845 | 845 | |
|
846 | 846 | hg pull -lfrev "head(pulled()) and not closed()" |
|
847 | 847 | |
|
848 | 848 | """ |
|
849 | 849 | |
|
850 | 850 | try: |
|
851 | 851 | firstpulled = repo.firstpulled |
|
852 | 852 | except AttributeError: |
|
853 | 853 | raise error.Abort(_("pulled() only available in --lfrev")) |
|
854 | 854 | return smartset.baseset([r for r in subset if r >= firstpulled]) |
|
855 | 855 | |
|
856 | 856 | @eh.wrapcommand('clone', |
|
857 | 857 | opts=[('', 'all-largefiles', None, |
|
858 | 858 | _('download all versions of all largefiles'))]) |
|
859 | 859 | def overrideclone(orig, ui, source, dest=None, **opts): |
|
860 | 860 | d = dest |
|
861 | 861 | if d is None: |
|
862 | 862 | d = hg.defaultdest(source) |
|
863 | 863 | if opts.get(r'all_largefiles') and not hg.islocal(d): |
|
864 | 864 | raise error.Abort(_( |
|
865 | 865 | '--all-largefiles is incompatible with non-local destination %s') % |
|
866 | 866 | d) |
|
867 | 867 | |
|
868 | 868 | return orig(ui, source, dest, **opts) |
|
869 | 869 | |
|
870 | 870 | @eh.wrapfunction(hg, 'clone') |
|
871 | 871 | def hgclone(orig, ui, opts, *args, **kwargs): |
|
872 | 872 | result = orig(ui, opts, *args, **kwargs) |
|
873 | 873 | |
|
874 | 874 | if result is not None: |
|
875 | 875 | sourcerepo, destrepo = result |
|
876 | 876 | repo = destrepo.local() |
|
877 | 877 | |
|
878 | 878 | # When cloning to a remote repo (like through SSH), no repo is available |
|
879 | 879 | # from the peer. Therefore the largefiles can't be downloaded and the |
|
880 | 880 | # hgrc can't be updated. |
|
881 | 881 | if not repo: |
|
882 | 882 | return result |
|
883 | 883 | |
|
884 | 884 | # Caching is implicitly limited to 'rev' option, since the dest repo was |
|
885 | 885 | # truncated at that point. The user may expect a download count with |
|
886 | 886 | # this option, so attempt whether or not this is a largefile repo. |
|
887 | 887 | if opts.get('all_largefiles'): |
|
888 | 888 | success, missing = lfcommands.downloadlfiles(ui, repo, None) |
|
889 | 889 | |
|
890 | 890 | if missing != 0: |
|
891 | 891 | return None |
|
892 | 892 | |
|
893 | 893 | return result |
|
894 | 894 | |
|
895 | 895 | @eh.wrapcommand('rebase', extension='rebase') |
|
896 | 896 | def overriderebase(orig, ui, repo, **opts): |
|
897 | 897 | if not util.safehasattr(repo, '_largefilesenabled'): |
|
898 | 898 | return orig(ui, repo, **opts) |
|
899 | 899 | |
|
900 | 900 | resuming = opts.get(r'continue') |
|
901 | 901 | repo._lfcommithooks.append(lfutil.automatedcommithook(resuming)) |
|
902 | 902 | repo._lfstatuswriters.append(lambda *msg, **opts: None) |
|
903 | 903 | try: |
|
904 | 904 | return orig(ui, repo, **opts) |
|
905 | 905 | finally: |
|
906 | 906 | repo._lfstatuswriters.pop() |
|
907 | 907 | repo._lfcommithooks.pop() |
|
908 | 908 | |
|
909 | 909 | @eh.wrapcommand('archive') |
|
910 | 910 | def overridearchivecmd(orig, ui, repo, dest, **opts): |
|
911 | 911 | repo.unfiltered().lfstatus = True |
|
912 | 912 | |
|
913 | 913 | try: |
|
914 | 914 | return orig(ui, repo.unfiltered(), dest, **opts) |
|
915 | 915 | finally: |
|
916 | 916 | repo.unfiltered().lfstatus = False |
|
917 | 917 | |
|
918 | 918 | @eh.wrapfunction(webcommands, 'archive') |
|
919 | 919 | def hgwebarchive(orig, web): |
|
920 | 920 | web.repo.lfstatus = True |
|
921 | 921 | |
|
922 | 922 | try: |
|
923 | 923 | return orig(web) |
|
924 | 924 | finally: |
|
925 | 925 | web.repo.lfstatus = False |
|
926 | 926 | |
|
927 | 927 | @eh.wrapfunction(archival, 'archive') |
|
928 | 928 | def overridearchive(orig, repo, dest, node, kind, decode=True, match=None, |
|
929 | 929 | prefix='', mtime=None, subrepos=None): |
|
930 | 930 | # For some reason setting repo.lfstatus in hgwebarchive only changes the |
|
931 | 931 | # unfiltered repo's attr, so check that as well. |
|
932 | 932 | if not repo.lfstatus and not repo.unfiltered().lfstatus: |
|
933 | 933 | return orig(repo, dest, node, kind, decode, match, prefix, mtime, |
|
934 | 934 | subrepos) |
|
935 | 935 | |
|
936 | 936 | # No need to lock because we are only reading history and |
|
937 | 937 | # largefile caches, neither of which are modified. |
|
938 | 938 | if node is not None: |
|
939 | 939 | lfcommands.cachelfiles(repo.ui, repo, node) |
|
940 | 940 | |
|
941 | 941 | if kind not in archival.archivers: |
|
942 | 942 | raise error.Abort(_("unknown archive type '%s'") % kind) |
|
943 | 943 | |
|
944 | 944 | ctx = repo[node] |
|
945 | 945 | |
|
946 | 946 | if kind == 'files': |
|
947 | 947 | if prefix: |
|
948 | 948 | raise error.Abort( |
|
949 | 949 | _('cannot give prefix when archiving to files')) |
|
950 | 950 | else: |
|
951 | 951 | prefix = archival.tidyprefix(dest, kind, prefix) |
|
952 | 952 | |
|
953 | 953 | def write(name, mode, islink, getdata): |
|
954 | 954 | if match and not match(name): |
|
955 | 955 | return |
|
956 | 956 | data = getdata() |
|
957 | 957 | if decode: |
|
958 | 958 | data = repo.wwritedata(name, data) |
|
959 | 959 | archiver.addfile(prefix + name, mode, islink, data) |
|
960 | 960 | |
|
961 | 961 | archiver = archival.archivers[kind](dest, mtime or ctx.date()[0]) |
|
962 | 962 | |
|
963 | 963 | if repo.ui.configbool("ui", "archivemeta"): |
|
964 | 964 | write('.hg_archival.txt', 0o644, False, |
|
965 | 965 | lambda: archival.buildmetadata(ctx)) |
|
966 | 966 | |
|
967 | 967 | for f in ctx: |
|
968 | 968 | ff = ctx.flags(f) |
|
969 | 969 | getdata = ctx[f].data |
|
970 | 970 | lfile = lfutil.splitstandin(f) |
|
971 | 971 | if lfile is not None: |
|
972 | 972 | if node is not None: |
|
973 | 973 | path = lfutil.findfile(repo, getdata().strip()) |
|
974 | 974 | |
|
975 | 975 | if path is None: |
|
976 | 976 | raise error.Abort( |
|
977 | 977 | _('largefile %s not found in repo store or system cache') |
|
978 | 978 | % lfile) |
|
979 | 979 | else: |
|
980 | 980 | path = lfile |
|
981 | 981 | |
|
982 | 982 | f = lfile |
|
983 | 983 | |
|
984 | 984 | getdata = lambda: util.readfile(path) |
|
985 | 985 | write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata) |
|
986 | 986 | |
|
987 | 987 | if subrepos: |
|
988 | 988 | for subpath in sorted(ctx.substate): |
|
989 | 989 | sub = ctx.workingsub(subpath) |
|
990 | 990 | submatch = matchmod.subdirmatcher(subpath, match) |
|
991 | 991 | subprefix = prefix + subpath + '/' |
|
992 | 992 | sub._repo.lfstatus = True |
|
993 | 993 | sub.archive(archiver, subprefix, submatch) |
|
994 | 994 | |
|
995 | 995 | archiver.done() |
|
996 | 996 | |
|
997 | 997 | @eh.wrapfunction(subrepo.hgsubrepo, 'archive') |
|
998 | 998 | def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True): |
|
999 | 999 | lfenabled = util.safehasattr(repo._repo, '_largefilesenabled') |
|
1000 | 1000 | if not lfenabled or not repo._repo.lfstatus: |
|
1001 | 1001 | return orig(repo, archiver, prefix, match, decode) |
|
1002 | 1002 | |
|
1003 | 1003 | repo._get(repo._state + ('hg',)) |
|
1004 | 1004 | rev = repo._state[1] |
|
1005 | 1005 | ctx = repo._repo[rev] |
|
1006 | 1006 | |
|
1007 | 1007 | if ctx.node() is not None: |
|
1008 | 1008 | lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node()) |
|
1009 | 1009 | |
|
1010 | 1010 | def write(name, mode, islink, getdata): |
|
1011 | 1011 | # At this point, the standin has been replaced with the largefile name, |
|
1012 | 1012 | # so the normal matcher works here without the lfutil variants. |
|
1013 | 1013 | if match and not match(f): |
|
1014 | 1014 | return |
|
1015 | 1015 | data = getdata() |
|
1016 | 1016 | if decode: |
|
1017 | 1017 | data = repo._repo.wwritedata(name, data) |
|
1018 | 1018 | |
|
1019 | 1019 | archiver.addfile(prefix + name, mode, islink, data) |
|
1020 | 1020 | |
|
1021 | 1021 | for f in ctx: |
|
1022 | 1022 | ff = ctx.flags(f) |
|
1023 | 1023 | getdata = ctx[f].data |
|
1024 | 1024 | lfile = lfutil.splitstandin(f) |
|
1025 | 1025 | if lfile is not None: |
|
1026 | 1026 | if ctx.node() is not None: |
|
1027 | 1027 | path = lfutil.findfile(repo._repo, getdata().strip()) |
|
1028 | 1028 | |
|
1029 | 1029 | if path is None: |
|
1030 | 1030 | raise error.Abort( |
|
1031 | 1031 | _('largefile %s not found in repo store or system cache') |
|
1032 | 1032 | % lfile) |
|
1033 | 1033 | else: |
|
1034 | 1034 | path = lfile |
|
1035 | 1035 | |
|
1036 | 1036 | f = lfile |
|
1037 | 1037 | |
|
1038 | 1038 | getdata = lambda: util.readfile(os.path.join(prefix, path)) |
|
1039 | 1039 | |
|
1040 | 1040 | write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata) |
|
1041 | 1041 | |
|
1042 | 1042 | for subpath in sorted(ctx.substate): |
|
1043 | 1043 | sub = ctx.workingsub(subpath) |
|
1044 | 1044 | submatch = matchmod.subdirmatcher(subpath, match) |
|
1045 | 1045 | subprefix = prefix + subpath + '/' |
|
1046 | 1046 | sub._repo.lfstatus = True |
|
1047 | 1047 | sub.archive(archiver, subprefix, submatch, decode) |
|
1048 | 1048 | |
|
1049 | 1049 | # If a largefile is modified, the change is not reflected in its |
|
1050 | 1050 | # standin until a commit. cmdutil.bailifchanged() raises an exception |
|
1051 | 1051 | # if the repo has uncommitted changes. Wrap it to also check if |
|
1052 | 1052 | # largefiles were changed. This is used by bisect, backout and fetch. |
|
1053 | 1053 | @eh.wrapfunction(cmdutil, 'bailifchanged') |
|
1054 | 1054 | def overridebailifchanged(orig, repo, *args, **kwargs): |
|
1055 | 1055 | orig(repo, *args, **kwargs) |
|
1056 | 1056 | repo.lfstatus = True |
|
1057 | 1057 | s = repo.status() |
|
1058 | 1058 | repo.lfstatus = False |
|
1059 | 1059 | if s.modified or s.added or s.removed or s.deleted: |
|
1060 | 1060 | raise error.Abort(_('uncommitted changes')) |
|
1061 | 1061 | |
|
1062 | 1062 | @eh.wrapfunction(cmdutil, 'postcommitstatus') |
|
1063 | 1063 | def postcommitstatus(orig, repo, *args, **kwargs): |
|
1064 | 1064 | repo.lfstatus = True |
|
1065 | 1065 | try: |
|
1066 | 1066 | return orig(repo, *args, **kwargs) |
|
1067 | 1067 | finally: |
|
1068 | 1068 | repo.lfstatus = False |
|
1069 | 1069 | |
|
1070 | 1070 | @eh.wrapfunction(cmdutil, 'forget') |
|
1071 | 1071 | def cmdutilforget(orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, |
|
1072 | 1072 | interactive): |
|
1073 | 1073 | normalmatcher = composenormalfilematcher(match, repo[None].manifest()) |
|
1074 | 1074 | bad, forgot = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, |
|
1075 | 1075 | dryrun, interactive) |
|
1076 | 1076 | m = composelargefilematcher(match, repo[None].manifest()) |
|
1077 | 1077 | |
|
1078 | 1078 | try: |
|
1079 | 1079 | repo.lfstatus = True |
|
1080 | 1080 | s = repo.status(match=m, clean=True) |
|
1081 | 1081 | finally: |
|
1082 | 1082 | repo.lfstatus = False |
|
1083 | 1083 | manifest = repo[None].manifest() |
|
1084 | 1084 | forget = sorted(s.modified + s.added + s.deleted + s.clean) |
|
1085 | 1085 | forget = [f for f in forget if lfutil.standin(f) in manifest] |
|
1086 | 1086 | |
|
1087 | 1087 | for f in forget: |
|
1088 | 1088 | fstandin = lfutil.standin(f) |
|
1089 | 1089 | if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin): |
|
1090 | 1090 | ui.warn(_('not removing %s: file is already untracked\n') |
|
1091 | 1091 | % uipathfn(f)) |
|
1092 | 1092 | bad.append(f) |
|
1093 | 1093 | |
|
1094 | 1094 | for f in forget: |
|
1095 | 1095 | if ui.verbose or not m.exact(f): |
|
1096 | 1096 | ui.status(_('removing %s\n') % uipathfn(f)) |
|
1097 | 1097 | |
|
1098 | 1098 | # Need to lock because standin files are deleted then removed from the |
|
1099 | 1099 | # repository and we could race in-between. |
|
1100 | 1100 | with repo.wlock(): |
|
1101 | 1101 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
1102 | 1102 | for f in forget: |
|
1103 | 1103 | if lfdirstate[f] == 'a': |
|
1104 | 1104 | lfdirstate.drop(f) |
|
1105 | 1105 | else: |
|
1106 | 1106 | lfdirstate.remove(f) |
|
1107 | 1107 | lfdirstate.write() |
|
1108 | 1108 | standins = [lfutil.standin(f) for f in forget] |
|
1109 | 1109 | for f in standins: |
|
1110 | 1110 | repo.wvfs.unlinkpath(f, ignoremissing=True) |
|
1111 | 1111 | rejected = repo[None].forget(standins) |
|
1112 | 1112 | |
|
1113 | 1113 | bad.extend(f for f in rejected if f in m.files()) |
|
1114 | 1114 | forgot.extend(f for f in forget if f not in rejected) |
|
1115 | 1115 | return bad, forgot |
|
1116 | 1116 | |
|
1117 | 1117 | def _getoutgoings(repo, other, missing, addfunc): |
|
1118 | 1118 | """get pairs of filename and largefile hash in outgoing revisions |
|
1119 | 1119 | in 'missing'. |
|
1120 | 1120 | |
|
1121 | 1121 | largefiles already existing on 'other' repository are ignored. |
|
1122 | 1122 | |
|
1123 | 1123 | 'addfunc' is invoked with each unique pairs of filename and |
|
1124 | 1124 | largefile hash value. |
|
1125 | 1125 | """ |
|
1126 | 1126 | knowns = set() |
|
1127 | 1127 | lfhashes = set() |
|
1128 | 1128 | def dedup(fn, lfhash): |
|
1129 | 1129 | k = (fn, lfhash) |
|
1130 | 1130 | if k not in knowns: |
|
1131 | 1131 | knowns.add(k) |
|
1132 | 1132 | lfhashes.add(lfhash) |
|
1133 | 1133 | lfutil.getlfilestoupload(repo, missing, dedup) |
|
1134 | 1134 | if lfhashes: |
|
1135 | 1135 | lfexists = storefactory.openstore(repo, other).exists(lfhashes) |
|
1136 | 1136 | for fn, lfhash in knowns: |
|
1137 | 1137 | if not lfexists[lfhash]: # lfhash doesn't exist on "other" |
|
1138 | 1138 | addfunc(fn, lfhash) |
|
1139 | 1139 | |
|
1140 | 1140 | def outgoinghook(ui, repo, other, opts, missing): |
|
1141 | 1141 | if opts.pop('large', None): |
|
1142 | 1142 | lfhashes = set() |
|
1143 | 1143 | if ui.debugflag: |
|
1144 | 1144 | toupload = {} |
|
1145 | 1145 | def addfunc(fn, lfhash): |
|
1146 | 1146 | if fn not in toupload: |
|
1147 | 1147 | toupload[fn] = [] |
|
1148 | 1148 | toupload[fn].append(lfhash) |
|
1149 | 1149 | lfhashes.add(lfhash) |
|
1150 | 1150 | def showhashes(fn): |
|
1151 | 1151 | for lfhash in sorted(toupload[fn]): |
|
1152 | 1152 | ui.debug(' %s\n' % (lfhash)) |
|
1153 | 1153 | else: |
|
1154 | 1154 | toupload = set() |
|
1155 | 1155 | def addfunc(fn, lfhash): |
|
1156 | 1156 | toupload.add(fn) |
|
1157 | 1157 | lfhashes.add(lfhash) |
|
1158 | 1158 | def showhashes(fn): |
|
1159 | 1159 | pass |
|
1160 | 1160 | _getoutgoings(repo, other, missing, addfunc) |
|
1161 | 1161 | |
|
1162 | 1162 | if not toupload: |
|
1163 | 1163 | ui.status(_('largefiles: no files to upload\n')) |
|
1164 | 1164 | else: |
|
1165 | 1165 | ui.status(_('largefiles to upload (%d entities):\n') |
|
1166 | 1166 | % (len(lfhashes))) |
|
1167 | 1167 | for file in sorted(toupload): |
|
1168 | 1168 | ui.status(lfutil.splitstandin(file) + '\n') |
|
1169 | 1169 | showhashes(file) |
|
1170 | 1170 | ui.status('\n') |
|
1171 | 1171 | |
|
1172 | 1172 | @eh.wrapcommand('outgoing', |
|
1173 | 1173 | opts=[('', 'large', None, _('display outgoing largefiles'))]) |
|
1174 | 1174 | def _outgoingcmd(orig, *args, **kwargs): |
|
1175 | 1175 | # Nothing to do here other than add the extra help option- the hook above |
|
1176 | 1176 | # processes it. |
|
1177 | 1177 | return orig(*args, **kwargs) |
|
1178 | 1178 | |
|
1179 | 1179 | def summaryremotehook(ui, repo, opts, changes): |
|
1180 | 1180 | largeopt = opts.get('large', False) |
|
1181 | 1181 | if changes is None: |
|
1182 | 1182 | if largeopt: |
|
1183 | 1183 | return (False, True) # only outgoing check is needed |
|
1184 | 1184 | else: |
|
1185 | 1185 | return (False, False) |
|
1186 | 1186 | elif largeopt: |
|
1187 | 1187 | url, branch, peer, outgoing = changes[1] |
|
1188 | 1188 | if peer is None: |
|
1189 | 1189 | # i18n: column positioning for "hg summary" |
|
1190 | 1190 | ui.status(_('largefiles: (no remote repo)\n')) |
|
1191 | 1191 | return |
|
1192 | 1192 | |
|
1193 | 1193 | toupload = set() |
|
1194 | 1194 | lfhashes = set() |
|
1195 | 1195 | def addfunc(fn, lfhash): |
|
1196 | 1196 | toupload.add(fn) |
|
1197 | 1197 | lfhashes.add(lfhash) |
|
1198 | 1198 | _getoutgoings(repo, peer, outgoing.missing, addfunc) |
|
1199 | 1199 | |
|
1200 | 1200 | if not toupload: |
|
1201 | 1201 | # i18n: column positioning for "hg summary" |
|
1202 | 1202 | ui.status(_('largefiles: (no files to upload)\n')) |
|
1203 | 1203 | else: |
|
1204 | 1204 | # i18n: column positioning for "hg summary" |
|
1205 | 1205 | ui.status(_('largefiles: %d entities for %d files to upload\n') |
|
1206 | 1206 | % (len(lfhashes), len(toupload))) |
|
1207 | 1207 | |
|
1208 | 1208 | @eh.wrapcommand('summary', |
|
1209 | 1209 | opts=[('', 'large', None, _('display outgoing largefiles'))]) |
|
1210 | 1210 | def overridesummary(orig, ui, repo, *pats, **opts): |
|
1211 | 1211 | try: |
|
1212 | 1212 | repo.lfstatus = True |
|
1213 | 1213 | orig(ui, repo, *pats, **opts) |
|
1214 | 1214 | finally: |
|
1215 | 1215 | repo.lfstatus = False |
|
1216 | 1216 | |
|
1217 | 1217 | @eh.wrapfunction(scmutil, 'addremove') |
|
1218 | 1218 | def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None): |
|
1219 | 1219 | if opts is None: |
|
1220 | 1220 | opts = {} |
|
1221 | 1221 | if not lfutil.islfilesrepo(repo): |
|
1222 | 1222 | return orig(repo, matcher, prefix, uipathfn, opts) |
|
1223 | 1223 | # Get the list of missing largefiles so we can remove them |
|
1224 | 1224 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) |
|
1225 | 1225 | unsure, s = lfdirstate.status(matchmod.always(), subrepos=[], |
|
1226 | 1226 | ignored=False, clean=False, unknown=False) |
|
1227 | 1227 | |
|
1228 | 1228 | # Call into the normal remove code, but the removing of the standin, we want |
|
1229 | 1229 | # to have handled by original addremove. Monkey patching here makes sure |
|
1230 | 1230 | # we don't remove the standin in the largefiles code, preventing a very |
|
1231 | 1231 | # confused state later. |
|
1232 | 1232 | if s.deleted: |
|
1233 | 1233 | m = copy.copy(matcher) |
|
1234 | 1234 | |
|
1235 | 1235 | # The m._files and m._map attributes are not changed to the deleted list |
|
1236 | 1236 | # because that affects the m.exact() test, which in turn governs whether |
|
1237 | 1237 | # or not the file name is printed, and how. Simply limit the original |
|
1238 | 1238 | # matches to those in the deleted status list. |
|
1239 | 1239 | matchfn = m.matchfn |
|
1240 | 1240 | m.matchfn = lambda f: f in s.deleted and matchfn(f) |
|
1241 | 1241 | |
|
1242 | 1242 | removelargefiles(repo.ui, repo, True, m, uipathfn, opts.get('dry_run'), |
|
1243 | 1243 | **pycompat.strkwargs(opts)) |
|
1244 | 1244 | # Call into the normal add code, and any files that *should* be added as |
|
1245 | 1245 | # largefiles will be |
|
1246 | 1246 | added, bad = addlargefiles(repo.ui, repo, True, matcher, uipathfn, |
|
1247 | 1247 | **pycompat.strkwargs(opts)) |
|
1248 | 1248 | # Now that we've handled largefiles, hand off to the original addremove |
|
1249 | 1249 | # function to take care of the rest. Make sure it doesn't do anything with |
|
1250 | 1250 | # largefiles by passing a matcher that will ignore them. |
|
1251 | 1251 | matcher = composenormalfilematcher(matcher, repo[None].manifest(), added) |
|
1252 | 1252 | return orig(repo, matcher, prefix, uipathfn, opts) |
|
1253 | 1253 | |
|
1254 | 1254 | # Calling purge with --all will cause the largefiles to be deleted. |
|
1255 | 1255 | # Override repo.status to prevent this from happening. |
|
1256 | 1256 | @eh.wrapcommand('purge', extension='purge') |
|
1257 | 1257 | def overridepurge(orig, ui, repo, *dirs, **opts): |
|
1258 | 1258 | # XXX Monkey patching a repoview will not work. The assigned attribute will |
|
1259 | 1259 | # be set on the unfiltered repo, but we will only lookup attributes in the |
|
1260 | 1260 | # unfiltered repo if the lookup in the repoview object itself fails. As the |
|
1261 | 1261 | # monkey patched method exists on the repoview class the lookup will not |
|
1262 | 1262 | # fail. As a result, the original version will shadow the monkey patched |
|
1263 | 1263 | # one, defeating the monkey patch. |
|
1264 | 1264 | # |
|
1265 | 1265 | # As a work around we use an unfiltered repo here. We should do something |
|
1266 | 1266 | # cleaner instead. |
|
1267 | 1267 | repo = repo.unfiltered() |
|
1268 | 1268 | oldstatus = repo.status |
|
1269 | 1269 | def overridestatus(node1='.', node2=None, match=None, ignored=False, |
|
1270 | 1270 | clean=False, unknown=False, listsubrepos=False): |
|
1271 | 1271 | r = oldstatus(node1, node2, match, ignored, clean, unknown, |
|
1272 | 1272 | listsubrepos) |
|
1273 | 1273 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
1274 | 1274 | unknown = [f for f in r.unknown if lfdirstate[f] == '?'] |
|
1275 | 1275 | ignored = [f for f in r.ignored if lfdirstate[f] == '?'] |
|
1276 | 1276 | return scmutil.status(r.modified, r.added, r.removed, r.deleted, |
|
1277 | 1277 | unknown, ignored, r.clean) |
|
1278 | 1278 | repo.status = overridestatus |
|
1279 | 1279 | orig(ui, repo, *dirs, **opts) |
|
1280 | 1280 | repo.status = oldstatus |
|
1281 | 1281 | |
|
1282 | 1282 | @eh.wrapcommand('rollback') |
|
1283 | 1283 | def overriderollback(orig, ui, repo, **opts): |
|
1284 | 1284 | with repo.wlock(): |
|
1285 | 1285 | before = repo.dirstate.parents() |
|
1286 | 1286 | orphans = set(f for f in repo.dirstate |
|
1287 | 1287 | if lfutil.isstandin(f) and repo.dirstate[f] != 'r') |
|
1288 | 1288 | result = orig(ui, repo, **opts) |
|
1289 | 1289 | after = repo.dirstate.parents() |
|
1290 | 1290 | if before == after: |
|
1291 | 1291 | return result # no need to restore standins |
|
1292 | 1292 | |
|
1293 | 1293 | pctx = repo['.'] |
|
1294 | 1294 | for f in repo.dirstate: |
|
1295 | 1295 | if lfutil.isstandin(f): |
|
1296 | 1296 | orphans.discard(f) |
|
1297 | 1297 | if repo.dirstate[f] == 'r': |
|
1298 | 1298 | repo.wvfs.unlinkpath(f, ignoremissing=True) |
|
1299 | 1299 | elif f in pctx: |
|
1300 | 1300 | fctx = pctx[f] |
|
1301 | 1301 | repo.wwrite(f, fctx.data(), fctx.flags()) |
|
1302 | 1302 | else: |
|
1303 | 1303 | # content of standin is not so important in 'a', |
|
1304 | 1304 | # 'm' or 'n' (coming from the 2nd parent) cases |
|
1305 | 1305 | lfutil.writestandin(repo, f, '', False) |
|
1306 | 1306 | for standin in orphans: |
|
1307 | 1307 | repo.wvfs.unlinkpath(standin, ignoremissing=True) |
|
1308 | 1308 | |
|
1309 | 1309 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
1310 | 1310 | orphans = set(lfdirstate) |
|
1311 | 1311 | lfiles = lfutil.listlfiles(repo) |
|
1312 | 1312 | for file in lfiles: |
|
1313 | 1313 | lfutil.synclfdirstate(repo, lfdirstate, file, True) |
|
1314 | 1314 | orphans.discard(file) |
|
1315 | 1315 | for lfile in orphans: |
|
1316 | 1316 | lfdirstate.drop(lfile) |
|
1317 | 1317 | lfdirstate.write() |
|
1318 | 1318 | return result |
|
1319 | 1319 | |
|
1320 | 1320 | @eh.wrapcommand('transplant', extension='transplant') |
|
1321 | 1321 | def overridetransplant(orig, ui, repo, *revs, **opts): |
|
1322 | 1322 | resuming = opts.get(r'continue') |
|
1323 | 1323 | repo._lfcommithooks.append(lfutil.automatedcommithook(resuming)) |
|
1324 | 1324 | repo._lfstatuswriters.append(lambda *msg, **opts: None) |
|
1325 | 1325 | try: |
|
1326 | 1326 | result = orig(ui, repo, *revs, **opts) |
|
1327 | 1327 | finally: |
|
1328 | 1328 | repo._lfstatuswriters.pop() |
|
1329 | 1329 | repo._lfcommithooks.pop() |
|
1330 | 1330 | return result |
|
1331 | 1331 | |
|
1332 | 1332 | @eh.wrapcommand('cat') |
|
1333 | 1333 | def overridecat(orig, ui, repo, file1, *pats, **opts): |
|
1334 | 1334 | opts = pycompat.byteskwargs(opts) |
|
1335 | 1335 | ctx = scmutil.revsingle(repo, opts.get('rev')) |
|
1336 | 1336 | err = 1 |
|
1337 | 1337 | notbad = set() |
|
1338 | 1338 | m = scmutil.match(ctx, (file1,) + pats, opts) |
|
1339 | 1339 | origmatchfn = m.matchfn |
|
1340 | 1340 | def lfmatchfn(f): |
|
1341 | 1341 | if origmatchfn(f): |
|
1342 | 1342 | return True |
|
1343 | 1343 | lf = lfutil.splitstandin(f) |
|
1344 | 1344 | if lf is None: |
|
1345 | 1345 | return False |
|
1346 | 1346 | notbad.add(lf) |
|
1347 | 1347 | return origmatchfn(lf) |
|
1348 | 1348 | m.matchfn = lfmatchfn |
|
1349 | 1349 | origbadfn = m.bad |
|
1350 | 1350 | def lfbadfn(f, msg): |
|
1351 | 1351 | if not f in notbad: |
|
1352 | 1352 | origbadfn(f, msg) |
|
1353 | 1353 | m.bad = lfbadfn |
|
1354 | 1354 | |
|
1355 | 1355 | origvisitdirfn = m.visitdir |
|
1356 | 1356 | def lfvisitdirfn(dir): |
|
1357 | 1357 | if dir == lfutil.shortname: |
|
1358 | 1358 | return True |
|
1359 | 1359 | ret = origvisitdirfn(dir) |
|
1360 | 1360 | if ret: |
|
1361 | 1361 | return ret |
|
1362 | 1362 | lf = lfutil.splitstandin(dir) |
|
1363 | 1363 | if lf is None: |
|
1364 | 1364 | return False |
|
1365 | 1365 | return origvisitdirfn(lf) |
|
1366 | 1366 | m.visitdir = lfvisitdirfn |
|
1367 | 1367 | |
|
1368 | 1368 | for f in ctx.walk(m): |
|
1369 | 1369 | with cmdutil.makefileobj(ctx, opts.get('output'), pathname=f) as fp: |
|
1370 | 1370 | lf = lfutil.splitstandin(f) |
|
1371 | 1371 | if lf is None or origmatchfn(f): |
|
1372 | 1372 | # duplicating unreachable code from commands.cat |
|
1373 | 1373 | data = ctx[f].data() |
|
1374 | 1374 | if opts.get('decode'): |
|
1375 | 1375 | data = repo.wwritedata(f, data) |
|
1376 | 1376 | fp.write(data) |
|
1377 | 1377 | else: |
|
1378 | 1378 | hash = lfutil.readasstandin(ctx[f]) |
|
1379 | 1379 | if not lfutil.inusercache(repo.ui, hash): |
|
1380 | 1380 | store = storefactory.openstore(repo) |
|
1381 | 1381 | success, missing = store.get([(lf, hash)]) |
|
1382 | 1382 | if len(success) != 1: |
|
1383 | 1383 | raise error.Abort( |
|
1384 | 1384 | _('largefile %s is not in cache and could not be ' |
|
1385 | 1385 | 'downloaded') % lf) |
|
1386 | 1386 | path = lfutil.usercachepath(repo.ui, hash) |
|
1387 | 1387 | with open(path, "rb") as fpin: |
|
1388 | 1388 | for chunk in util.filechunkiter(fpin): |
|
1389 | 1389 | fp.write(chunk) |
|
1390 | 1390 | err = 0 |
|
1391 | 1391 | return err |
|
1392 | 1392 | |
|
1393 | 1393 | @eh.wrapfunction(merge, 'update') |
|
1394 | 1394 | def mergeupdate(orig, repo, node, branchmerge, force, |
|
1395 | 1395 | *args, **kwargs): |
|
1396 | 1396 | matcher = kwargs.get(r'matcher', None) |
|
1397 | 1397 | # note if this is a partial update |
|
1398 | 1398 | partial = matcher and not matcher.always() |
|
1399 | 1399 | with repo.wlock(): |
|
1400 | 1400 | # branch | | | |
|
1401 | 1401 | # merge | force | partial | action |
|
1402 | 1402 | # -------+-------+---------+-------------- |
|
1403 | 1403 | # x | x | x | linear-merge |
|
1404 | 1404 | # o | x | x | branch-merge |
|
1405 | 1405 | # x | o | x | overwrite (as clean update) |
|
1406 | 1406 | # o | o | x | force-branch-merge (*1) |
|
1407 | 1407 | # x | x | o | (*) |
|
1408 | 1408 | # o | x | o | (*) |
|
1409 | 1409 | # x | o | o | overwrite (as revert) |
|
1410 | 1410 | # o | o | o | (*) |
|
1411 | 1411 | # |
|
1412 | 1412 | # (*) don't care |
|
1413 | 1413 | # (*1) deprecated, but used internally (e.g: "rebase --collapse") |
|
1414 | 1414 | |
|
1415 | 1415 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) |
|
1416 | 1416 | unsure, s = lfdirstate.status(matchmod.always(), subrepos=[], |
|
1417 | 1417 | ignored=False, clean=True, unknown=False) |
|
1418 | 1418 | oldclean = set(s.clean) |
|
1419 | 1419 | pctx = repo['.'] |
|
1420 | 1420 | dctx = repo[node] |
|
1421 | 1421 | for lfile in unsure + s.modified: |
|
1422 | 1422 | lfileabs = repo.wvfs.join(lfile) |
|
1423 | 1423 | if not repo.wvfs.exists(lfileabs): |
|
1424 | 1424 | continue |
|
1425 | 1425 | lfhash = lfutil.hashfile(lfileabs) |
|
1426 | 1426 | standin = lfutil.standin(lfile) |
|
1427 | 1427 | lfutil.writestandin(repo, standin, lfhash, |
|
1428 | 1428 | lfutil.getexecutable(lfileabs)) |
|
1429 | 1429 | if (standin in pctx and |
|
1430 | 1430 | lfhash == lfutil.readasstandin(pctx[standin])): |
|
1431 | 1431 | oldclean.add(lfile) |
|
1432 | 1432 | for lfile in s.added: |
|
1433 | 1433 | fstandin = lfutil.standin(lfile) |
|
1434 | 1434 | if fstandin not in dctx: |
|
1435 | 1435 | # in this case, content of standin file is meaningless |
|
1436 | 1436 | # (in dctx, lfile is unknown, or normal file) |
|
1437 | 1437 | continue |
|
1438 | 1438 | lfutil.updatestandin(repo, lfile, fstandin) |
|
1439 | 1439 | # mark all clean largefiles as dirty, just in case the update gets |
|
1440 | 1440 | # interrupted before largefiles and lfdirstate are synchronized |
|
1441 | 1441 | for lfile in oldclean: |
|
1442 | 1442 | lfdirstate.normallookup(lfile) |
|
1443 | 1443 | lfdirstate.write() |
|
1444 | 1444 | |
|
1445 | 1445 | oldstandins = lfutil.getstandinsstate(repo) |
|
1446 | 1446 | # Make sure the merge runs on disk, not in-memory. largefiles is not a |
|
1447 | 1447 | # good candidate for in-memory merge (large files, custom dirstate, |
|
1448 | 1448 | # matcher usage). |
|
1449 | 1449 | kwargs[r'wc'] = repo[None] |
|
1450 | 1450 | result = orig(repo, node, branchmerge, force, *args, **kwargs) |
|
1451 | 1451 | |
|
1452 | 1452 | newstandins = lfutil.getstandinsstate(repo) |
|
1453 | 1453 | filelist = lfutil.getlfilestoupdate(oldstandins, newstandins) |
|
1454 | 1454 | |
|
1455 | 1455 | # to avoid leaving all largefiles as dirty and thus rehash them, mark |
|
1456 | 1456 | # all the ones that didn't change as clean |
|
1457 | 1457 | for lfile in oldclean.difference(filelist): |
|
1458 | 1458 | lfdirstate.normal(lfile) |
|
1459 | 1459 | lfdirstate.write() |
|
1460 | 1460 | |
|
1461 | 1461 | if branchmerge or force or partial: |
|
1462 | 1462 | filelist.extend(s.deleted + s.removed) |
|
1463 | 1463 | |
|
1464 | 1464 | lfcommands.updatelfiles(repo.ui, repo, filelist=filelist, |
|
1465 | 1465 | normallookup=partial) |
|
1466 | 1466 | |
|
1467 | 1467 | return result |
|
1468 | 1468 | |
|
1469 | 1469 | @eh.wrapfunction(scmutil, 'marktouched') |
|
1470 | 1470 | def scmutilmarktouched(orig, repo, files, *args, **kwargs): |
|
1471 | 1471 | result = orig(repo, files, *args, **kwargs) |
|
1472 | 1472 | |
|
1473 | 1473 | filelist = [] |
|
1474 | 1474 | for f in files: |
|
1475 | 1475 | lf = lfutil.splitstandin(f) |
|
1476 | 1476 | if lf is not None: |
|
1477 | 1477 | filelist.append(lf) |
|
1478 | 1478 | if filelist: |
|
1479 | 1479 | lfcommands.updatelfiles(repo.ui, repo, filelist=filelist, |
|
1480 | 1480 | printmessage=False, normallookup=True) |
|
1481 | 1481 | |
|
1482 | 1482 | return result |
|
1483 | 1483 | |
|
1484 | 1484 | @eh.wrapfunction(upgrade, 'preservedrequirements') |
|
1485 | 1485 | @eh.wrapfunction(upgrade, 'supporteddestrequirements') |
|
1486 | 1486 | def upgraderequirements(orig, repo): |
|
1487 | 1487 | reqs = orig(repo) |
|
1488 | 1488 | if 'largefiles' in repo.requirements: |
|
1489 | 1489 | reqs.add('largefiles') |
|
1490 | 1490 | return reqs |
|
1491 | 1491 | |
|
1492 | 1492 | _lfscheme = 'largefile://' |
|
1493 | 1493 | |
|
1494 | 1494 | @eh.wrapfunction(urlmod, 'open') |
|
1495 | 1495 | def openlargefile(orig, ui, url_, data=None): |
|
1496 | 1496 | if url_.startswith(_lfscheme): |
|
1497 | 1497 | if data: |
|
1498 | 1498 | msg = "cannot use data on a 'largefile://' url" |
|
1499 | 1499 | raise error.ProgrammingError(msg) |
|
1500 | 1500 | lfid = url_[len(_lfscheme):] |
|
1501 | 1501 | return storefactory.getlfile(ui, lfid) |
|
1502 | 1502 | else: |
|
1503 | 1503 | return orig(ui, url_, data=data) |
@@ -1,63 +1,63 | |||
|
1 | 1 | # narrowdirstate.py - extensions to mercurial dirstate to support narrow clones |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2017 Google, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | from mercurial.i18n import _ |
|
11 | 11 | from mercurial import ( |
|
12 | 12 | error, |
|
13 | 13 | ) |
|
14 | 14 | |
|
15 | 15 | def wrapdirstate(repo, dirstate): |
|
16 | 16 | """Add narrow spec dirstate ignore, block changes outside narrow spec.""" |
|
17 | 17 | |
|
18 | 18 | def _editfunc(fn): |
|
19 | def _wrapper(self, *args): | |
|
19 | def _wrapper(self, *args, **kwargs): | |
|
20 | 20 | narrowmatch = repo.narrowmatch() |
|
21 | 21 | for f in args: |
|
22 | 22 | if f is not None and not narrowmatch(f) and f not in self: |
|
23 | 23 | raise error.Abort(_("cannot track '%s' - it is outside " + |
|
24 | 24 | "the narrow clone") % f) |
|
25 | return fn(self, *args) | |
|
25 | return fn(self, *args, **kwargs) | |
|
26 | 26 | return _wrapper |
|
27 | 27 | |
|
28 | 28 | class narrowdirstate(dirstate.__class__): |
|
29 | 29 | # Prevent adding/editing/copying/deleting files that are outside the |
|
30 | 30 | # sparse checkout |
|
31 | 31 | @_editfunc |
|
32 | def normal(self, *args): | |
|
33 | return super(narrowdirstate, self).normal(*args) | |
|
32 | def normal(self, *args, **kwargs): | |
|
33 | return super(narrowdirstate, self).normal(*args, **kwargs) | |
|
34 | 34 | |
|
35 | 35 | @_editfunc |
|
36 | 36 | def add(self, *args): |
|
37 | 37 | return super(narrowdirstate, self).add(*args) |
|
38 | 38 | |
|
39 | 39 | @_editfunc |
|
40 | 40 | def normallookup(self, *args): |
|
41 | 41 | return super(narrowdirstate, self).normallookup(*args) |
|
42 | 42 | |
|
43 | 43 | @_editfunc |
|
44 | 44 | def copy(self, *args): |
|
45 | 45 | return super(narrowdirstate, self).copy(*args) |
|
46 | 46 | |
|
47 | 47 | @_editfunc |
|
48 | 48 | def remove(self, *args): |
|
49 | 49 | return super(narrowdirstate, self).remove(*args) |
|
50 | 50 | |
|
51 | 51 | @_editfunc |
|
52 | 52 | def merge(self, *args): |
|
53 | 53 | return super(narrowdirstate, self).merge(*args) |
|
54 | 54 | |
|
55 | 55 | def rebuild(self, parent, allfiles, changedfiles=None): |
|
56 | 56 | if changedfiles is None: |
|
57 | 57 | # Rebuilding entire dirstate, let's filter allfiles to match the |
|
58 | 58 | # narrowspec. |
|
59 | 59 | allfiles = [f for f in allfiles if repo.narrowmatch()(f)] |
|
60 | 60 | super(narrowdirstate, self).rebuild(parent, allfiles, changedfiles) |
|
61 | 61 | |
|
62 | 62 | dirstate.__class__ = narrowdirstate |
|
63 | 63 | return dirstate |
@@ -1,1111 +1,1113 | |||
|
1 | 1 | # __init__.py - remotefilelog extension |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2013 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL) |
|
8 | 8 | |
|
9 | 9 | This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY |
|
10 | 10 | GUARANTEES. This means that repositories created with this extension may |
|
11 | 11 | only be usable with the exact version of this extension/Mercurial that was |
|
12 | 12 | used. The extension attempts to enforce this in order to prevent repository |
|
13 | 13 | corruption. |
|
14 | 14 | |
|
15 | 15 | remotefilelog works by fetching file contents lazily and storing them |
|
16 | 16 | in a cache on the client rather than in revlogs. This allows enormous |
|
17 | 17 | histories to be transferred only partially, making them easier to |
|
18 | 18 | operate on. |
|
19 | 19 | |
|
20 | 20 | Configs: |
|
21 | 21 | |
|
22 | 22 | ``packs.maxchainlen`` specifies the maximum delta chain length in pack files |
|
23 | 23 | |
|
24 | 24 | ``packs.maxpacksize`` specifies the maximum pack file size |
|
25 | 25 | |
|
26 | 26 | ``packs.maxpackfilecount`` specifies the maximum number of packs in the |
|
27 | 27 | shared cache (trees only for now) |
|
28 | 28 | |
|
29 | 29 | ``remotefilelog.backgroundprefetch`` runs prefetch in background when True |
|
30 | 30 | |
|
31 | 31 | ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and |
|
32 | 32 | update, and on other commands that use them. Different from pullprefetch. |
|
33 | 33 | |
|
34 | 34 | ``remotefilelog.gcrepack`` does garbage collection during repack when True |
|
35 | 35 | |
|
36 | 36 | ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before |
|
37 | 37 | it is garbage collected |
|
38 | 38 | |
|
39 | 39 | ``remotefilelog.repackonhggc`` runs repack on hg gc when True |
|
40 | 40 | |
|
41 | 41 | ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in |
|
42 | 42 | days after which it is no longer prefetched. |
|
43 | 43 | |
|
44 | 44 | ``remotefilelog.prefetchdelay`` specifies delay between background |
|
45 | 45 | prefetches in seconds after operations that change the working copy parent |
|
46 | 46 | |
|
47 | 47 | ``remotefilelog.data.gencountlimit`` constraints the minimum number of data |
|
48 | 48 | pack files required to be considered part of a generation. In particular, |
|
49 | 49 | minimum number of packs files > gencountlimit. |
|
50 | 50 | |
|
51 | 51 | ``remotefilelog.data.generations`` list for specifying the lower bound of |
|
52 | 52 | each generation of the data pack files. For example, list ['100MB','1MB'] |
|
53 | 53 | or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [ |
|
54 | 54 | 1MB, 100MB) and [100MB, infinity). |
|
55 | 55 | |
|
56 | 56 | ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to |
|
57 | 57 | include in an incremental data repack. |
|
58 | 58 | |
|
59 | 59 | ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for |
|
60 | 60 | it to be considered for an incremental data repack. |
|
61 | 61 | |
|
62 | 62 | ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files |
|
63 | 63 | to include in an incremental data repack. |
|
64 | 64 | |
|
65 | 65 | ``remotefilelog.history.gencountlimit`` constraints the minimum number of |
|
66 | 66 | history pack files required to be considered part of a generation. In |
|
67 | 67 | particular, minimum number of packs files > gencountlimit. |
|
68 | 68 | |
|
69 | 69 | ``remotefilelog.history.generations`` list for specifying the lower bound of |
|
70 | 70 | each generation of the history pack files. For example, list [ |
|
71 | 71 | '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [ |
|
72 | 72 | 0, 1MB), [1MB, 100MB) and [100MB, infinity). |
|
73 | 73 | |
|
74 | 74 | ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to |
|
75 | 75 | include in an incremental history repack. |
|
76 | 76 | |
|
77 | 77 | ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file |
|
78 | 78 | for it to be considered for an incremental history repack. |
|
79 | 79 | |
|
80 | 80 | ``remotefilelog.history.repacksizelimit`` the maximum total size of pack |
|
81 | 81 | files to include in an incremental history repack. |
|
82 | 82 | |
|
83 | 83 | ``remotefilelog.backgroundrepack`` automatically consolidate packs in the |
|
84 | 84 | background |
|
85 | 85 | |
|
86 | 86 | ``remotefilelog.cachepath`` path to cache |
|
87 | 87 | |
|
88 | 88 | ``remotefilelog.cachegroup`` if set, make cache directory sgid to this |
|
89 | 89 | group |
|
90 | 90 | |
|
91 | 91 | ``remotefilelog.cacheprocess`` binary to invoke for fetching file data |
|
92 | 92 | |
|
93 | 93 | ``remotefilelog.debug`` turn on remotefilelog-specific debug output |
|
94 | 94 | |
|
95 | 95 | ``remotefilelog.excludepattern`` pattern of files to exclude from pulls |
|
96 | 96 | |
|
97 | 97 | ``remotefilelog.includepattern`` pattern of files to include in pulls |
|
98 | 98 | |
|
99 | 99 | ``remotefilelog.fetchwarning``: message to print when too many |
|
100 | 100 | single-file fetches occur |
|
101 | 101 | |
|
102 | 102 | ``remotefilelog.getfilesstep`` number of files to request in a single RPC |
|
103 | 103 | |
|
104 | 104 | ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch |
|
105 | 105 | files, otherwise use optimistic fetching |
|
106 | 106 | |
|
107 | 107 | ``remotefilelog.pullprefetch`` revset for selecting files that should be |
|
108 | 108 | eagerly downloaded rather than lazily |
|
109 | 109 | |
|
110 | 110 | ``remotefilelog.reponame`` name of the repo. If set, used to partition |
|
111 | 111 | data from other repos in a shared store. |
|
112 | 112 | |
|
113 | 113 | ``remotefilelog.server`` if true, enable server-side functionality |
|
114 | 114 | |
|
115 | 115 | ``remotefilelog.servercachepath`` path for caching blobs on the server |
|
116 | 116 | |
|
117 | 117 | ``remotefilelog.serverexpiration`` number of days to keep cached server |
|
118 | 118 | blobs |
|
119 | 119 | |
|
120 | 120 | ``remotefilelog.validatecache`` if set, check cache entries for corruption |
|
121 | 121 | before returning blobs |
|
122 | 122 | |
|
123 | 123 | ``remotefilelog.validatecachelog`` if set, check cache entries for |
|
124 | 124 | corruption before returning metadata |
|
125 | 125 | |
|
126 | 126 | """ |
|
127 | 127 | from __future__ import absolute_import |
|
128 | 128 | |
|
129 | 129 | import os |
|
130 | 130 | import time |
|
131 | 131 | import traceback |
|
132 | 132 | |
|
133 | 133 | from mercurial.node import hex |
|
134 | 134 | from mercurial.i18n import _ |
|
135 | 135 | from mercurial import ( |
|
136 | 136 | changegroup, |
|
137 | 137 | changelog, |
|
138 | 138 | cmdutil, |
|
139 | 139 | commands, |
|
140 | 140 | configitems, |
|
141 | 141 | context, |
|
142 | 142 | copies, |
|
143 | 143 | debugcommands as hgdebugcommands, |
|
144 | 144 | dispatch, |
|
145 | 145 | error, |
|
146 | 146 | exchange, |
|
147 | 147 | extensions, |
|
148 | 148 | hg, |
|
149 | 149 | localrepo, |
|
150 | 150 | match, |
|
151 | 151 | merge, |
|
152 | 152 | node as nodemod, |
|
153 | 153 | patch, |
|
154 | 154 | pycompat, |
|
155 | 155 | registrar, |
|
156 | 156 | repair, |
|
157 | 157 | repoview, |
|
158 | 158 | revset, |
|
159 | 159 | scmutil, |
|
160 | 160 | smartset, |
|
161 | 161 | streamclone, |
|
162 | 162 | util, |
|
163 | 163 | ) |
|
164 | 164 | from . import ( |
|
165 | 165 | constants, |
|
166 | 166 | debugcommands, |
|
167 | 167 | fileserverclient, |
|
168 | 168 | remotefilectx, |
|
169 | 169 | remotefilelog, |
|
170 | 170 | remotefilelogserver, |
|
171 | 171 | repack as repackmod, |
|
172 | 172 | shallowbundle, |
|
173 | 173 | shallowrepo, |
|
174 | 174 | shallowstore, |
|
175 | 175 | shallowutil, |
|
176 | 176 | shallowverifier, |
|
177 | 177 | ) |
|
178 | 178 | |
|
179 | 179 | # ensures debug commands are registered |
|
180 | 180 | hgdebugcommands.command |
|
181 | 181 | |
|
182 | 182 | cmdtable = {} |
|
183 | 183 | command = registrar.command(cmdtable) |
|
184 | 184 | |
|
185 | 185 | configtable = {} |
|
186 | 186 | configitem = registrar.configitem(configtable) |
|
187 | 187 | |
|
188 | 188 | configitem('remotefilelog', 'debug', default=False) |
|
189 | 189 | |
|
190 | 190 | configitem('remotefilelog', 'reponame', default='') |
|
191 | 191 | configitem('remotefilelog', 'cachepath', default=None) |
|
192 | 192 | configitem('remotefilelog', 'cachegroup', default=None) |
|
193 | 193 | configitem('remotefilelog', 'cacheprocess', default=None) |
|
194 | 194 | configitem('remotefilelog', 'cacheprocess.includepath', default=None) |
|
195 | 195 | configitem("remotefilelog", "cachelimit", default="1000 GB") |
|
196 | 196 | |
|
197 | 197 | configitem('remotefilelog', 'fallbackpath', default=configitems.dynamicdefault, |
|
198 | 198 | alias=[('remotefilelog', 'fallbackrepo')]) |
|
199 | 199 | |
|
200 | 200 | configitem('remotefilelog', 'validatecachelog', default=None) |
|
201 | 201 | configitem('remotefilelog', 'validatecache', default='on') |
|
202 | 202 | configitem('remotefilelog', 'server', default=None) |
|
203 | 203 | configitem('remotefilelog', 'servercachepath', default=None) |
|
204 | 204 | configitem("remotefilelog", "serverexpiration", default=30) |
|
205 | 205 | configitem('remotefilelog', 'backgroundrepack', default=False) |
|
206 | 206 | configitem('remotefilelog', 'bgprefetchrevs', default=None) |
|
207 | 207 | configitem('remotefilelog', 'pullprefetch', default=None) |
|
208 | 208 | configitem('remotefilelog', 'backgroundprefetch', default=False) |
|
209 | 209 | configitem('remotefilelog', 'prefetchdelay', default=120) |
|
210 | 210 | configitem('remotefilelog', 'prefetchdays', default=14) |
|
211 | 211 | |
|
212 | 212 | configitem('remotefilelog', 'getfilesstep', default=10000) |
|
213 | 213 | configitem('remotefilelog', 'getfilestype', default='optimistic') |
|
214 | 214 | configitem('remotefilelog', 'batchsize', configitems.dynamicdefault) |
|
215 | 215 | configitem('remotefilelog', 'fetchwarning', default='') |
|
216 | 216 | |
|
217 | 217 | configitem('remotefilelog', 'includepattern', default=None) |
|
218 | 218 | configitem('remotefilelog', 'excludepattern', default=None) |
|
219 | 219 | |
|
220 | 220 | configitem('remotefilelog', 'gcrepack', default=False) |
|
221 | 221 | configitem('remotefilelog', 'repackonhggc', default=False) |
|
222 | 222 | configitem('repack', 'chainorphansbysize', default=True) |
|
223 | 223 | |
|
224 | 224 | configitem('packs', 'maxpacksize', default=0) |
|
225 | 225 | configitem('packs', 'maxchainlen', default=1000) |
|
226 | 226 | |
|
227 | 227 | # default TTL limit is 30 days |
|
228 | 228 | _defaultlimit = 60 * 60 * 24 * 30 |
|
229 | 229 | configitem('remotefilelog', 'nodettl', default=_defaultlimit) |
|
230 | 230 | |
|
231 | 231 | configitem('remotefilelog', 'data.gencountlimit', default=2), |
|
232 | 232 | configitem('remotefilelog', 'data.generations', |
|
233 | 233 | default=['1GB', '100MB', '1MB']) |
|
234 | 234 | configitem('remotefilelog', 'data.maxrepackpacks', default=50) |
|
235 | 235 | configitem('remotefilelog', 'data.repackmaxpacksize', default='4GB') |
|
236 | 236 | configitem('remotefilelog', 'data.repacksizelimit', default='100MB') |
|
237 | 237 | |
|
238 | 238 | configitem('remotefilelog', 'history.gencountlimit', default=2), |
|
239 | 239 | configitem('remotefilelog', 'history.generations', default=['100MB']) |
|
240 | 240 | configitem('remotefilelog', 'history.maxrepackpacks', default=50) |
|
241 | 241 | configitem('remotefilelog', 'history.repackmaxpacksize', default='400MB') |
|
242 | 242 | configitem('remotefilelog', 'history.repacksizelimit', default='100MB') |
|
243 | 243 | |
|
244 | 244 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
245 | 245 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
246 | 246 | # be specifying the version(s) of Mercurial they are tested with, or |
|
247 | 247 | # leave the attribute unspecified. |
|
248 | 248 | testedwith = 'ships-with-hg-core' |
|
249 | 249 | |
|
250 | 250 | repoclass = localrepo.localrepository |
|
251 | 251 | repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT) |
|
252 | 252 | |
|
253 | 253 | isenabled = shallowutil.isenabled |
|
254 | 254 | |
|
255 | 255 | def uisetup(ui): |
|
256 | 256 | """Wraps user facing Mercurial commands to swap them out with shallow |
|
257 | 257 | versions. |
|
258 | 258 | """ |
|
259 | 259 | hg.wirepeersetupfuncs.append(fileserverclient.peersetup) |
|
260 | 260 | |
|
261 | 261 | entry = extensions.wrapcommand(commands.table, 'clone', cloneshallow) |
|
262 | 262 | entry[1].append(('', 'shallow', None, |
|
263 | 263 | _("create a shallow clone which uses remote file " |
|
264 | 264 | "history"))) |
|
265 | 265 | |
|
266 | 266 | extensions.wrapcommand(commands.table, 'debugindex', |
|
267 | 267 | debugcommands.debugindex) |
|
268 | 268 | extensions.wrapcommand(commands.table, 'debugindexdot', |
|
269 | 269 | debugcommands.debugindexdot) |
|
270 | 270 | extensions.wrapcommand(commands.table, 'log', log) |
|
271 | 271 | extensions.wrapcommand(commands.table, 'pull', pull) |
|
272 | 272 | |
|
273 | 273 | # Prevent 'hg manifest --all' |
|
274 | 274 | def _manifest(orig, ui, repo, *args, **opts): |
|
275 | 275 | if (isenabled(repo) and opts.get(r'all')): |
|
276 | 276 | raise error.Abort(_("--all is not supported in a shallow repo")) |
|
277 | 277 | |
|
278 | 278 | return orig(ui, repo, *args, **opts) |
|
279 | 279 | extensions.wrapcommand(commands.table, "manifest", _manifest) |
|
280 | 280 | |
|
281 | 281 | # Wrap remotefilelog with lfs code |
|
282 | 282 | def _lfsloaded(loaded=False): |
|
283 | 283 | lfsmod = None |
|
284 | 284 | try: |
|
285 | 285 | lfsmod = extensions.find('lfs') |
|
286 | 286 | except KeyError: |
|
287 | 287 | pass |
|
288 | 288 | if lfsmod: |
|
289 | 289 | lfsmod.wrapfilelog(remotefilelog.remotefilelog) |
|
290 | 290 | fileserverclient._lfsmod = lfsmod |
|
291 | 291 | extensions.afterloaded('lfs', _lfsloaded) |
|
292 | 292 | |
|
293 | 293 | # debugdata needs remotefilelog.len to work |
|
294 | 294 | extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow) |
|
295 | 295 | |
|
296 | 296 | changegroup.cgpacker = shallowbundle.shallowcg1packer |
|
297 | 297 | |
|
298 | 298 | extensions.wrapfunction(changegroup, '_addchangegroupfiles', |
|
299 | 299 | shallowbundle.addchangegroupfiles) |
|
300 | 300 | extensions.wrapfunction( |
|
301 | 301 | changegroup, 'makechangegroup', shallowbundle.makechangegroup) |
|
302 | 302 | extensions.wrapfunction(localrepo, 'makestore', storewrapper) |
|
303 | 303 | extensions.wrapfunction(exchange, 'pull', exchangepull) |
|
304 | 304 | extensions.wrapfunction(merge, 'applyupdates', applyupdates) |
|
305 | 305 | extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles) |
|
306 | 306 | extensions.wrapfunction(context.workingctx, '_checklookup', checklookup) |
|
307 | 307 | extensions.wrapfunction(scmutil, '_findrenames', findrenames) |
|
308 | 308 | extensions.wrapfunction(copies, '_computeforwardmissing', |
|
309 | 309 | computeforwardmissing) |
|
310 | 310 | extensions.wrapfunction(dispatch, 'runcommand', runcommand) |
|
311 | 311 | extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets) |
|
312 | 312 | extensions.wrapfunction(context.changectx, 'filectx', filectx) |
|
313 | 313 | extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx) |
|
314 | 314 | extensions.wrapfunction(patch, 'trydiff', trydiff) |
|
315 | 315 | extensions.wrapfunction(hg, 'verify', _verify) |
|
316 | 316 | scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook) |
|
317 | 317 | |
|
318 | 318 | # disappointing hacks below |
|
319 | 319 | scmutil.getrenamedfn = getrenamedfn |
|
320 | 320 | extensions.wrapfunction(revset, 'filelog', filelogrevset) |
|
321 | 321 | revset.symbols['filelog'] = revset.filelog |
|
322 | 322 | extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs) |
|
323 | 323 | |
|
324 | 324 | |
|
325 | 325 | def cloneshallow(orig, ui, repo, *args, **opts): |
|
326 | 326 | if opts.get(r'shallow'): |
|
327 | 327 | repos = [] |
|
328 | 328 | def pull_shallow(orig, self, *args, **kwargs): |
|
329 | 329 | if not isenabled(self): |
|
330 | 330 | repos.append(self.unfiltered()) |
|
331 | 331 | # set up the client hooks so the post-clone update works |
|
332 | 332 | setupclient(self.ui, self.unfiltered()) |
|
333 | 333 | |
|
334 | 334 | # setupclient fixed the class on the repo itself |
|
335 | 335 | # but we also need to fix it on the repoview |
|
336 | 336 | if isinstance(self, repoview.repoview): |
|
337 | 337 | self.__class__.__bases__ = (self.__class__.__bases__[0], |
|
338 | 338 | self.unfiltered().__class__) |
|
339 | 339 | self.requirements.add(constants.SHALLOWREPO_REQUIREMENT) |
|
340 | 340 | self._writerequirements() |
|
341 | 341 | |
|
342 | 342 | # Since setupclient hadn't been called, exchange.pull was not |
|
343 | 343 | # wrapped. So we need to manually invoke our version of it. |
|
344 | 344 | return exchangepull(orig, self, *args, **kwargs) |
|
345 | 345 | else: |
|
346 | 346 | return orig(self, *args, **kwargs) |
|
347 | 347 | extensions.wrapfunction(exchange, 'pull', pull_shallow) |
|
348 | 348 | |
|
349 | 349 | # Wrap the stream logic to add requirements and to pass include/exclude |
|
350 | 350 | # patterns around. |
|
351 | 351 | def setup_streamout(repo, remote): |
|
352 | 352 | # Replace remote.stream_out with a version that sends file |
|
353 | 353 | # patterns. |
|
354 | 354 | def stream_out_shallow(orig): |
|
355 | 355 | caps = remote.capabilities() |
|
356 | 356 | if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps: |
|
357 | 357 | opts = {} |
|
358 | 358 | if repo.includepattern: |
|
359 | 359 | opts[r'includepattern'] = '\0'.join(repo.includepattern) |
|
360 | 360 | if repo.excludepattern: |
|
361 | 361 | opts[r'excludepattern'] = '\0'.join(repo.excludepattern) |
|
362 | 362 | return remote._callstream('stream_out_shallow', **opts) |
|
363 | 363 | else: |
|
364 | 364 | return orig() |
|
365 | 365 | extensions.wrapfunction(remote, 'stream_out', stream_out_shallow) |
|
366 | 366 | def stream_wrap(orig, op): |
|
367 | 367 | setup_streamout(op.repo, op.remote) |
|
368 | 368 | return orig(op) |
|
369 | 369 | extensions.wrapfunction( |
|
370 | 370 | streamclone, 'maybeperformlegacystreamclone', stream_wrap) |
|
371 | 371 | |
|
372 | 372 | def canperformstreamclone(orig, pullop, bundle2=False): |
|
373 | 373 | # remotefilelog is currently incompatible with the |
|
374 | 374 | # bundle2 flavor of streamclones, so force us to use |
|
375 | 375 | # v1 instead. |
|
376 | 376 | if 'v2' in pullop.remotebundle2caps.get('stream', []): |
|
377 | 377 | pullop.remotebundle2caps['stream'] = [ |
|
378 | 378 | c for c in pullop.remotebundle2caps['stream'] |
|
379 | 379 | if c != 'v2'] |
|
380 | 380 | if bundle2: |
|
381 | 381 | return False, None |
|
382 | 382 | supported, requirements = orig(pullop, bundle2=bundle2) |
|
383 | 383 | if requirements is not None: |
|
384 | 384 | requirements.add(constants.SHALLOWREPO_REQUIREMENT) |
|
385 | 385 | return supported, requirements |
|
386 | 386 | extensions.wrapfunction( |
|
387 | 387 | streamclone, 'canperformstreamclone', canperformstreamclone) |
|
388 | 388 | |
|
389 | 389 | try: |
|
390 | 390 | orig(ui, repo, *args, **opts) |
|
391 | 391 | finally: |
|
392 | 392 | if opts.get(r'shallow'): |
|
393 | 393 | for r in repos: |
|
394 | 394 | if util.safehasattr(r, 'fileservice'): |
|
395 | 395 | r.fileservice.close() |
|
396 | 396 | |
|
397 | 397 | def debugdatashallow(orig, *args, **kwds): |
|
398 | 398 | oldlen = remotefilelog.remotefilelog.__len__ |
|
399 | 399 | try: |
|
400 | 400 | remotefilelog.remotefilelog.__len__ = lambda x: 1 |
|
401 | 401 | return orig(*args, **kwds) |
|
402 | 402 | finally: |
|
403 | 403 | remotefilelog.remotefilelog.__len__ = oldlen |
|
404 | 404 | |
|
405 | 405 | def reposetup(ui, repo): |
|
406 | 406 | if not repo.local(): |
|
407 | 407 | return |
|
408 | 408 | |
|
409 | 409 | # put here intentionally bc doesnt work in uisetup |
|
410 | 410 | ui.setconfig('hooks', 'update.prefetch', wcpprefetch) |
|
411 | 411 | ui.setconfig('hooks', 'commit.prefetch', wcpprefetch) |
|
412 | 412 | |
|
413 | 413 | isserverenabled = ui.configbool('remotefilelog', 'server') |
|
414 | 414 | isshallowclient = isenabled(repo) |
|
415 | 415 | |
|
416 | 416 | if isserverenabled and isshallowclient: |
|
417 | 417 | raise RuntimeError("Cannot be both a server and shallow client.") |
|
418 | 418 | |
|
419 | 419 | if isshallowclient: |
|
420 | 420 | setupclient(ui, repo) |
|
421 | 421 | |
|
422 | 422 | if isserverenabled: |
|
423 | 423 | remotefilelogserver.setupserver(ui, repo) |
|
424 | 424 | |
|
425 | 425 | def setupclient(ui, repo): |
|
426 | 426 | if not isinstance(repo, localrepo.localrepository): |
|
427 | 427 | return |
|
428 | 428 | |
|
429 | 429 | # Even clients get the server setup since they need to have the |
|
430 | 430 | # wireprotocol endpoints registered. |
|
431 | 431 | remotefilelogserver.onetimesetup(ui) |
|
432 | 432 | onetimeclientsetup(ui) |
|
433 | 433 | |
|
434 | 434 | shallowrepo.wraprepo(repo) |
|
435 | 435 | repo.store = shallowstore.wrapstore(repo.store) |
|
436 | 436 | |
|
437 | 437 | def storewrapper(orig, requirements, path, vfstype): |
|
438 | 438 | s = orig(requirements, path, vfstype) |
|
439 | 439 | if constants.SHALLOWREPO_REQUIREMENT in requirements: |
|
440 | 440 | s = shallowstore.wrapstore(s) |
|
441 | 441 | |
|
442 | 442 | return s |
|
443 | 443 | |
|
444 | 444 | # prefetch files before update |
|
445 |
def applyupdates(orig, repo, actions, wctx, mctx, overwrite, |
|
|
445 | def applyupdates(orig, repo, actions, wctx, mctx, overwrite, wantfiledata, | |
|
446 | labels=None): | |
|
446 | 447 | if isenabled(repo): |
|
447 | 448 | manifest = mctx.manifest() |
|
448 | 449 | files = [] |
|
449 | 450 | for f, args, msg in actions['g']: |
|
450 | 451 | files.append((f, hex(manifest[f]))) |
|
451 | 452 | # batch fetch the needed files from the server |
|
452 | 453 | repo.fileservice.prefetch(files) |
|
453 |
return orig(repo, actions, wctx, mctx, overwrite, |
|
|
454 | return orig(repo, actions, wctx, mctx, overwrite, wantfiledata, | |
|
455 | labels=labels) | |
|
454 | 456 | |
|
455 | 457 | # Prefetch merge checkunknownfiles |
|
456 | 458 | def checkunknownfiles(orig, repo, wctx, mctx, force, actions, |
|
457 | 459 | *args, **kwargs): |
|
458 | 460 | if isenabled(repo): |
|
459 | 461 | files = [] |
|
460 | 462 | sparsematch = repo.maybesparsematch(mctx.rev()) |
|
461 | 463 | for f, (m, actionargs, msg) in actions.iteritems(): |
|
462 | 464 | if sparsematch and not sparsematch(f): |
|
463 | 465 | continue |
|
464 | 466 | if m in ('c', 'dc', 'cm'): |
|
465 | 467 | files.append((f, hex(mctx.filenode(f)))) |
|
466 | 468 | elif m == 'dg': |
|
467 | 469 | f2 = actionargs[0] |
|
468 | 470 | files.append((f2, hex(mctx.filenode(f2)))) |
|
469 | 471 | # batch fetch the needed files from the server |
|
470 | 472 | repo.fileservice.prefetch(files) |
|
471 | 473 | return orig(repo, wctx, mctx, force, actions, *args, **kwargs) |
|
472 | 474 | |
|
473 | 475 | # Prefetch files before status attempts to look at their size and contents |
|
474 | 476 | def checklookup(orig, self, files): |
|
475 | 477 | repo = self._repo |
|
476 | 478 | if isenabled(repo): |
|
477 | 479 | prefetchfiles = [] |
|
478 | 480 | for parent in self._parents: |
|
479 | 481 | for f in files: |
|
480 | 482 | if f in parent: |
|
481 | 483 | prefetchfiles.append((f, hex(parent.filenode(f)))) |
|
482 | 484 | # batch fetch the needed files from the server |
|
483 | 485 | repo.fileservice.prefetch(prefetchfiles) |
|
484 | 486 | return orig(self, files) |
|
485 | 487 | |
|
486 | 488 | # Prefetch the logic that compares added and removed files for renames |
|
487 | 489 | def findrenames(orig, repo, matcher, added, removed, *args, **kwargs): |
|
488 | 490 | if isenabled(repo): |
|
489 | 491 | files = [] |
|
490 | 492 | pmf = repo['.'].manifest() |
|
491 | 493 | for f in removed: |
|
492 | 494 | if f in pmf: |
|
493 | 495 | files.append((f, hex(pmf[f]))) |
|
494 | 496 | # batch fetch the needed files from the server |
|
495 | 497 | repo.fileservice.prefetch(files) |
|
496 | 498 | return orig(repo, matcher, added, removed, *args, **kwargs) |
|
497 | 499 | |
|
498 | 500 | # prefetch files before pathcopies check |
|
499 | 501 | def computeforwardmissing(orig, a, b, match=None): |
|
500 | 502 | missing = orig(a, b, match=match) |
|
501 | 503 | repo = a._repo |
|
502 | 504 | if isenabled(repo): |
|
503 | 505 | mb = b.manifest() |
|
504 | 506 | |
|
505 | 507 | files = [] |
|
506 | 508 | sparsematch = repo.maybesparsematch(b.rev()) |
|
507 | 509 | if sparsematch: |
|
508 | 510 | sparsemissing = set() |
|
509 | 511 | for f in missing: |
|
510 | 512 | if sparsematch(f): |
|
511 | 513 | files.append((f, hex(mb[f]))) |
|
512 | 514 | sparsemissing.add(f) |
|
513 | 515 | missing = sparsemissing |
|
514 | 516 | |
|
515 | 517 | # batch fetch the needed files from the server |
|
516 | 518 | repo.fileservice.prefetch(files) |
|
517 | 519 | return missing |
|
518 | 520 | |
|
519 | 521 | # close cache miss server connection after the command has finished |
|
520 | 522 | def runcommand(orig, lui, repo, *args, **kwargs): |
|
521 | 523 | fileservice = None |
|
522 | 524 | # repo can be None when running in chg: |
|
523 | 525 | # - at startup, reposetup was called because serve is not norepo |
|
524 | 526 | # - a norepo command like "help" is called |
|
525 | 527 | if repo and isenabled(repo): |
|
526 | 528 | fileservice = repo.fileservice |
|
527 | 529 | try: |
|
528 | 530 | return orig(lui, repo, *args, **kwargs) |
|
529 | 531 | finally: |
|
530 | 532 | if fileservice: |
|
531 | 533 | fileservice.close() |
|
532 | 534 | |
|
533 | 535 | # prevent strip from stripping remotefilelogs |
|
534 | 536 | def _collectbrokencsets(orig, repo, files, striprev): |
|
535 | 537 | if isenabled(repo): |
|
536 | 538 | files = list([f for f in files if not repo.shallowmatch(f)]) |
|
537 | 539 | return orig(repo, files, striprev) |
|
538 | 540 | |
|
539 | 541 | # changectx wrappers |
|
540 | 542 | def filectx(orig, self, path, fileid=None, filelog=None): |
|
541 | 543 | if fileid is None: |
|
542 | 544 | fileid = self.filenode(path) |
|
543 | 545 | if (isenabled(self._repo) and self._repo.shallowmatch(path)): |
|
544 | 546 | return remotefilectx.remotefilectx(self._repo, path, fileid=fileid, |
|
545 | 547 | changectx=self, filelog=filelog) |
|
546 | 548 | return orig(self, path, fileid=fileid, filelog=filelog) |
|
547 | 549 | |
|
548 | 550 | def workingfilectx(orig, self, path, filelog=None): |
|
549 | 551 | if (isenabled(self._repo) and self._repo.shallowmatch(path)): |
|
550 | 552 | return remotefilectx.remoteworkingfilectx(self._repo, path, |
|
551 | 553 | workingctx=self, |
|
552 | 554 | filelog=filelog) |
|
553 | 555 | return orig(self, path, filelog=filelog) |
|
554 | 556 | |
|
555 | 557 | # prefetch required revisions before a diff |
|
556 | 558 | def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed, |
|
557 | 559 | copy, getfilectx, *args, **kwargs): |
|
558 | 560 | if isenabled(repo): |
|
559 | 561 | prefetch = [] |
|
560 | 562 | mf1 = ctx1.manifest() |
|
561 | 563 | for fname in modified + added + removed: |
|
562 | 564 | if fname in mf1: |
|
563 | 565 | fnode = getfilectx(fname, ctx1).filenode() |
|
564 | 566 | # fnode can be None if it's a edited working ctx file |
|
565 | 567 | if fnode: |
|
566 | 568 | prefetch.append((fname, hex(fnode))) |
|
567 | 569 | if fname not in removed: |
|
568 | 570 | fnode = getfilectx(fname, ctx2).filenode() |
|
569 | 571 | if fnode: |
|
570 | 572 | prefetch.append((fname, hex(fnode))) |
|
571 | 573 | |
|
572 | 574 | repo.fileservice.prefetch(prefetch) |
|
573 | 575 | |
|
574 | 576 | return orig(repo, revs, ctx1, ctx2, modified, added, removed, copy, |
|
575 | 577 | getfilectx, *args, **kwargs) |
|
576 | 578 | |
|
577 | 579 | # Prevent verify from processing files |
|
578 | 580 | # a stub for mercurial.hg.verify() |
|
579 | 581 | def _verify(orig, repo, level=None): |
|
580 | 582 | lock = repo.lock() |
|
581 | 583 | try: |
|
582 | 584 | return shallowverifier.shallowverifier(repo).verify() |
|
583 | 585 | finally: |
|
584 | 586 | lock.release() |
|
585 | 587 | |
|
586 | 588 | |
|
587 | 589 | clientonetime = False |
|
588 | 590 | def onetimeclientsetup(ui): |
|
589 | 591 | global clientonetime |
|
590 | 592 | if clientonetime: |
|
591 | 593 | return |
|
592 | 594 | clientonetime = True |
|
593 | 595 | |
|
594 | 596 | # Don't commit filelogs until we know the commit hash, since the hash |
|
595 | 597 | # is present in the filelog blob. |
|
596 | 598 | # This violates Mercurial's filelog->manifest->changelog write order, |
|
597 | 599 | # but is generally fine for client repos. |
|
598 | 600 | pendingfilecommits = [] |
|
599 | 601 | def addrawrevision(orig, self, rawtext, transaction, link, p1, p2, node, |
|
600 | 602 | flags, cachedelta=None, _metatuple=None): |
|
601 | 603 | if isinstance(link, int): |
|
602 | 604 | pendingfilecommits.append( |
|
603 | 605 | (self, rawtext, transaction, link, p1, p2, node, flags, |
|
604 | 606 | cachedelta, _metatuple)) |
|
605 | 607 | return node |
|
606 | 608 | else: |
|
607 | 609 | return orig(self, rawtext, transaction, link, p1, p2, node, flags, |
|
608 | 610 | cachedelta, _metatuple=_metatuple) |
|
609 | 611 | extensions.wrapfunction( |
|
610 | 612 | remotefilelog.remotefilelog, 'addrawrevision', addrawrevision) |
|
611 | 613 | |
|
612 | 614 | def changelogadd(orig, self, *args): |
|
613 | 615 | oldlen = len(self) |
|
614 | 616 | node = orig(self, *args) |
|
615 | 617 | newlen = len(self) |
|
616 | 618 | if oldlen != newlen: |
|
617 | 619 | for oldargs in pendingfilecommits: |
|
618 | 620 | log, rt, tr, link, p1, p2, n, fl, c, m = oldargs |
|
619 | 621 | linknode = self.node(link) |
|
620 | 622 | if linknode == node: |
|
621 | 623 | log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m) |
|
622 | 624 | else: |
|
623 | 625 | raise error.ProgrammingError( |
|
624 | 626 | 'pending multiple integer revisions are not supported') |
|
625 | 627 | else: |
|
626 | 628 | # "link" is actually wrong here (it is set to len(changelog)) |
|
627 | 629 | # if changelog remains unchanged, skip writing file revisions |
|
628 | 630 | # but still do a sanity check about pending multiple revisions |
|
629 | 631 | if len(set(x[3] for x in pendingfilecommits)) > 1: |
|
630 | 632 | raise error.ProgrammingError( |
|
631 | 633 | 'pending multiple integer revisions are not supported') |
|
632 | 634 | del pendingfilecommits[:] |
|
633 | 635 | return node |
|
634 | 636 | extensions.wrapfunction(changelog.changelog, 'add', changelogadd) |
|
635 | 637 | |
|
636 | 638 | def getrenamedfn(repo, endrev=None): |
|
637 | 639 | rcache = {} |
|
638 | 640 | |
|
639 | 641 | def getrenamed(fn, rev): |
|
640 | 642 | '''looks up all renames for a file (up to endrev) the first |
|
641 | 643 | time the file is given. It indexes on the changerev and only |
|
642 | 644 | parses the manifest if linkrev != changerev. |
|
643 | 645 | Returns rename info for fn at changerev rev.''' |
|
644 | 646 | if rev in rcache.setdefault(fn, {}): |
|
645 | 647 | return rcache[fn][rev] |
|
646 | 648 | |
|
647 | 649 | try: |
|
648 | 650 | fctx = repo[rev].filectx(fn) |
|
649 | 651 | for ancestor in fctx.ancestors(): |
|
650 | 652 | if ancestor.path() == fn: |
|
651 | 653 | renamed = ancestor.renamed() |
|
652 | 654 | rcache[fn][ancestor.rev()] = renamed and renamed[0] |
|
653 | 655 | |
|
654 | 656 | renamed = fctx.renamed() |
|
655 | 657 | return renamed and renamed[0] |
|
656 | 658 | except error.LookupError: |
|
657 | 659 | return None |
|
658 | 660 | |
|
659 | 661 | return getrenamed |
|
660 | 662 | |
|
661 | 663 | def walkfilerevs(orig, repo, match, follow, revs, fncache): |
|
662 | 664 | if not isenabled(repo): |
|
663 | 665 | return orig(repo, match, follow, revs, fncache) |
|
664 | 666 | |
|
665 | 667 | # remotefilelog's can't be walked in rev order, so throw. |
|
666 | 668 | # The caller will see the exception and walk the commit tree instead. |
|
667 | 669 | if not follow: |
|
668 | 670 | raise cmdutil.FileWalkError("Cannot walk via filelog") |
|
669 | 671 | |
|
670 | 672 | wanted = set() |
|
671 | 673 | minrev, maxrev = min(revs), max(revs) |
|
672 | 674 | |
|
673 | 675 | pctx = repo['.'] |
|
674 | 676 | for filename in match.files(): |
|
675 | 677 | if filename not in pctx: |
|
676 | 678 | raise error.Abort(_('cannot follow file not in parent ' |
|
677 | 679 | 'revision: "%s"') % filename) |
|
678 | 680 | fctx = pctx[filename] |
|
679 | 681 | |
|
680 | 682 | linkrev = fctx.linkrev() |
|
681 | 683 | if linkrev >= minrev and linkrev <= maxrev: |
|
682 | 684 | fncache.setdefault(linkrev, []).append(filename) |
|
683 | 685 | wanted.add(linkrev) |
|
684 | 686 | |
|
685 | 687 | for ancestor in fctx.ancestors(): |
|
686 | 688 | linkrev = ancestor.linkrev() |
|
687 | 689 | if linkrev >= minrev and linkrev <= maxrev: |
|
688 | 690 | fncache.setdefault(linkrev, []).append(ancestor.path()) |
|
689 | 691 | wanted.add(linkrev) |
|
690 | 692 | |
|
691 | 693 | return wanted |
|
692 | 694 | |
|
693 | 695 | def filelogrevset(orig, repo, subset, x): |
|
694 | 696 | """``filelog(pattern)`` |
|
695 | 697 | Changesets connected to the specified filelog. |
|
696 | 698 | |
|
697 | 699 | For performance reasons, ``filelog()`` does not show every changeset |
|
698 | 700 | that affects the requested file(s). See :hg:`help log` for details. For |
|
699 | 701 | a slower, more accurate result, use ``file()``. |
|
700 | 702 | """ |
|
701 | 703 | |
|
702 | 704 | if not isenabled(repo): |
|
703 | 705 | return orig(repo, subset, x) |
|
704 | 706 | |
|
705 | 707 | # i18n: "filelog" is a keyword |
|
706 | 708 | pat = revset.getstring(x, _("filelog requires a pattern")) |
|
707 | 709 | m = match.match(repo.root, repo.getcwd(), [pat], default='relpath', |
|
708 | 710 | ctx=repo[None]) |
|
709 | 711 | s = set() |
|
710 | 712 | |
|
711 | 713 | if not match.patkind(pat): |
|
712 | 714 | # slow |
|
713 | 715 | for r in subset: |
|
714 | 716 | ctx = repo[r] |
|
715 | 717 | cfiles = ctx.files() |
|
716 | 718 | for f in m.files(): |
|
717 | 719 | if f in cfiles: |
|
718 | 720 | s.add(ctx.rev()) |
|
719 | 721 | break |
|
720 | 722 | else: |
|
721 | 723 | # partial |
|
722 | 724 | files = (f for f in repo[None] if m(f)) |
|
723 | 725 | for f in files: |
|
724 | 726 | fctx = repo[None].filectx(f) |
|
725 | 727 | s.add(fctx.linkrev()) |
|
726 | 728 | for actx in fctx.ancestors(): |
|
727 | 729 | s.add(actx.linkrev()) |
|
728 | 730 | |
|
729 | 731 | return smartset.baseset([r for r in subset if r in s]) |
|
730 | 732 | |
|
731 | 733 | @command('gc', [], _('hg gc [REPO...]'), norepo=True) |
|
732 | 734 | def gc(ui, *args, **opts): |
|
733 | 735 | '''garbage collect the client and server filelog caches |
|
734 | 736 | ''' |
|
735 | 737 | cachepaths = set() |
|
736 | 738 | |
|
737 | 739 | # get the system client cache |
|
738 | 740 | systemcache = shallowutil.getcachepath(ui, allowempty=True) |
|
739 | 741 | if systemcache: |
|
740 | 742 | cachepaths.add(systemcache) |
|
741 | 743 | |
|
742 | 744 | # get repo client and server cache |
|
743 | 745 | repopaths = [] |
|
744 | 746 | pwd = ui.environ.get('PWD') |
|
745 | 747 | if pwd: |
|
746 | 748 | repopaths.append(pwd) |
|
747 | 749 | |
|
748 | 750 | repopaths.extend(args) |
|
749 | 751 | repos = [] |
|
750 | 752 | for repopath in repopaths: |
|
751 | 753 | try: |
|
752 | 754 | repo = hg.peer(ui, {}, repopath) |
|
753 | 755 | repos.append(repo) |
|
754 | 756 | |
|
755 | 757 | repocache = shallowutil.getcachepath(repo.ui, allowempty=True) |
|
756 | 758 | if repocache: |
|
757 | 759 | cachepaths.add(repocache) |
|
758 | 760 | except error.RepoError: |
|
759 | 761 | pass |
|
760 | 762 | |
|
761 | 763 | # gc client cache |
|
762 | 764 | for cachepath in cachepaths: |
|
763 | 765 | gcclient(ui, cachepath) |
|
764 | 766 | |
|
765 | 767 | # gc server cache |
|
766 | 768 | for repo in repos: |
|
767 | 769 | remotefilelogserver.gcserver(ui, repo._repo) |
|
768 | 770 | |
|
769 | 771 | def gcclient(ui, cachepath): |
|
770 | 772 | # get list of repos that use this cache |
|
771 | 773 | repospath = os.path.join(cachepath, 'repos') |
|
772 | 774 | if not os.path.exists(repospath): |
|
773 | 775 | ui.warn(_("no known cache at %s\n") % cachepath) |
|
774 | 776 | return |
|
775 | 777 | |
|
776 | 778 | reposfile = open(repospath, 'rb') |
|
777 | 779 | repos = {r[:-1] for r in reposfile.readlines()} |
|
778 | 780 | reposfile.close() |
|
779 | 781 | |
|
780 | 782 | # build list of useful files |
|
781 | 783 | validrepos = [] |
|
782 | 784 | keepkeys = set() |
|
783 | 785 | |
|
784 | 786 | sharedcache = None |
|
785 | 787 | filesrepacked = False |
|
786 | 788 | |
|
787 | 789 | count = 0 |
|
788 | 790 | progress = ui.makeprogress(_("analyzing repositories"), unit="repos", |
|
789 | 791 | total=len(repos)) |
|
790 | 792 | for path in repos: |
|
791 | 793 | progress.update(count) |
|
792 | 794 | count += 1 |
|
793 | 795 | try: |
|
794 | 796 | path = ui.expandpath(os.path.normpath(path)) |
|
795 | 797 | except TypeError as e: |
|
796 | 798 | ui.warn(_("warning: malformed path: %r:%s\n") % (path, e)) |
|
797 | 799 | traceback.print_exc() |
|
798 | 800 | continue |
|
799 | 801 | try: |
|
800 | 802 | peer = hg.peer(ui, {}, path) |
|
801 | 803 | repo = peer._repo |
|
802 | 804 | except error.RepoError: |
|
803 | 805 | continue |
|
804 | 806 | |
|
805 | 807 | validrepos.append(path) |
|
806 | 808 | |
|
807 | 809 | # Protect against any repo or config changes that have happened since |
|
808 | 810 | # this repo was added to the repos file. We'd rather this loop succeed |
|
809 | 811 | # and too much be deleted, than the loop fail and nothing gets deleted. |
|
810 | 812 | if not isenabled(repo): |
|
811 | 813 | continue |
|
812 | 814 | |
|
813 | 815 | if not util.safehasattr(repo, 'name'): |
|
814 | 816 | ui.warn(_("repo %s is a misconfigured remotefilelog repo\n") % path) |
|
815 | 817 | continue |
|
816 | 818 | |
|
817 | 819 | # If garbage collection on repack and repack on hg gc are enabled |
|
818 | 820 | # then loose files are repacked and garbage collected. |
|
819 | 821 | # Otherwise regular garbage collection is performed. |
|
820 | 822 | repackonhggc = repo.ui.configbool('remotefilelog', 'repackonhggc') |
|
821 | 823 | gcrepack = repo.ui.configbool('remotefilelog', 'gcrepack') |
|
822 | 824 | if repackonhggc and gcrepack: |
|
823 | 825 | try: |
|
824 | 826 | repackmod.incrementalrepack(repo) |
|
825 | 827 | filesrepacked = True |
|
826 | 828 | continue |
|
827 | 829 | except (IOError, repackmod.RepackAlreadyRunning): |
|
828 | 830 | # If repack cannot be performed due to not enough disk space |
|
829 | 831 | # continue doing garbage collection of loose files w/o repack |
|
830 | 832 | pass |
|
831 | 833 | |
|
832 | 834 | reponame = repo.name |
|
833 | 835 | if not sharedcache: |
|
834 | 836 | sharedcache = repo.sharedstore |
|
835 | 837 | |
|
836 | 838 | # Compute a keepset which is not garbage collected |
|
837 | 839 | def keyfn(fname, fnode): |
|
838 | 840 | return fileserverclient.getcachekey(reponame, fname, hex(fnode)) |
|
839 | 841 | keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys) |
|
840 | 842 | |
|
841 | 843 | progress.complete() |
|
842 | 844 | |
|
843 | 845 | # write list of valid repos back |
|
844 | 846 | oldumask = os.umask(0o002) |
|
845 | 847 | try: |
|
846 | 848 | reposfile = open(repospath, 'wb') |
|
847 | 849 | reposfile.writelines([("%s\n" % r) for r in validrepos]) |
|
848 | 850 | reposfile.close() |
|
849 | 851 | finally: |
|
850 | 852 | os.umask(oldumask) |
|
851 | 853 | |
|
852 | 854 | # prune cache |
|
853 | 855 | if sharedcache is not None: |
|
854 | 856 | sharedcache.gc(keepkeys) |
|
855 | 857 | elif not filesrepacked: |
|
856 | 858 | ui.warn(_("warning: no valid repos in repofile\n")) |
|
857 | 859 | |
|
858 | 860 | def log(orig, ui, repo, *pats, **opts): |
|
859 | 861 | if not isenabled(repo): |
|
860 | 862 | return orig(ui, repo, *pats, **opts) |
|
861 | 863 | |
|
862 | 864 | follow = opts.get(r'follow') |
|
863 | 865 | revs = opts.get(r'rev') |
|
864 | 866 | if pats: |
|
865 | 867 | # Force slowpath for non-follow patterns and follows that start from |
|
866 | 868 | # non-working-copy-parent revs. |
|
867 | 869 | if not follow or revs: |
|
868 | 870 | # This forces the slowpath |
|
869 | 871 | opts[r'removed'] = True |
|
870 | 872 | |
|
871 | 873 | # If this is a non-follow log without any revs specified, recommend that |
|
872 | 874 | # the user add -f to speed it up. |
|
873 | 875 | if not follow and not revs: |
|
874 | 876 | match = scmutil.match(repo['.'], pats, pycompat.byteskwargs(opts)) |
|
875 | 877 | isfile = not match.anypats() |
|
876 | 878 | if isfile: |
|
877 | 879 | for file in match.files(): |
|
878 | 880 | if not os.path.isfile(repo.wjoin(file)): |
|
879 | 881 | isfile = False |
|
880 | 882 | break |
|
881 | 883 | |
|
882 | 884 | if isfile: |
|
883 | 885 | ui.warn(_("warning: file log can be slow on large repos - " + |
|
884 | 886 | "use -f to speed it up\n")) |
|
885 | 887 | |
|
886 | 888 | return orig(ui, repo, *pats, **opts) |
|
887 | 889 | |
|
888 | 890 | def revdatelimit(ui, revset): |
|
889 | 891 | """Update revset so that only changesets no older than 'prefetchdays' days |
|
890 | 892 | are included. The default value is set to 14 days. If 'prefetchdays' is set |
|
891 | 893 | to zero or negative value then date restriction is not applied. |
|
892 | 894 | """ |
|
893 | 895 | days = ui.configint('remotefilelog', 'prefetchdays') |
|
894 | 896 | if days > 0: |
|
895 | 897 | revset = '(%s) & date(-%s)' % (revset, days) |
|
896 | 898 | return revset |
|
897 | 899 | |
|
898 | 900 | def readytofetch(repo): |
|
899 | 901 | """Check that enough time has passed since the last background prefetch. |
|
900 | 902 | This only relates to prefetches after operations that change the working |
|
901 | 903 | copy parent. Default delay between background prefetches is 2 minutes. |
|
902 | 904 | """ |
|
903 | 905 | timeout = repo.ui.configint('remotefilelog', 'prefetchdelay') |
|
904 | 906 | fname = repo.vfs.join('lastprefetch') |
|
905 | 907 | |
|
906 | 908 | ready = False |
|
907 | 909 | with open(fname, 'a'): |
|
908 | 910 | # the with construct above is used to avoid race conditions |
|
909 | 911 | modtime = os.path.getmtime(fname) |
|
910 | 912 | if (time.time() - modtime) > timeout: |
|
911 | 913 | os.utime(fname, None) |
|
912 | 914 | ready = True |
|
913 | 915 | |
|
914 | 916 | return ready |
|
915 | 917 | |
|
916 | 918 | def wcpprefetch(ui, repo, **kwargs): |
|
917 | 919 | """Prefetches in background revisions specified by bgprefetchrevs revset. |
|
918 | 920 | Does background repack if backgroundrepack flag is set in config. |
|
919 | 921 | """ |
|
920 | 922 | shallow = isenabled(repo) |
|
921 | 923 | bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs') |
|
922 | 924 | isready = readytofetch(repo) |
|
923 | 925 | |
|
924 | 926 | if not (shallow and bgprefetchrevs and isready): |
|
925 | 927 | return |
|
926 | 928 | |
|
927 | 929 | bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack') |
|
928 | 930 | # update a revset with a date limit |
|
929 | 931 | bgprefetchrevs = revdatelimit(ui, bgprefetchrevs) |
|
930 | 932 | |
|
931 | 933 | def anon(): |
|
932 | 934 | if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch: |
|
933 | 935 | return |
|
934 | 936 | repo.ranprefetch = True |
|
935 | 937 | repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack) |
|
936 | 938 | |
|
937 | 939 | repo._afterlock(anon) |
|
938 | 940 | |
|
939 | 941 | def pull(orig, ui, repo, *pats, **opts): |
|
940 | 942 | result = orig(ui, repo, *pats, **opts) |
|
941 | 943 | |
|
942 | 944 | if isenabled(repo): |
|
943 | 945 | # prefetch if it's configured |
|
944 | 946 | prefetchrevset = ui.config('remotefilelog', 'pullprefetch') |
|
945 | 947 | bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack') |
|
946 | 948 | bgprefetch = repo.ui.configbool('remotefilelog', 'backgroundprefetch') |
|
947 | 949 | |
|
948 | 950 | if prefetchrevset: |
|
949 | 951 | ui.status(_("prefetching file contents\n")) |
|
950 | 952 | revs = scmutil.revrange(repo, [prefetchrevset]) |
|
951 | 953 | base = repo['.'].rev() |
|
952 | 954 | if bgprefetch: |
|
953 | 955 | repo.backgroundprefetch(prefetchrevset, repack=bgrepack) |
|
954 | 956 | else: |
|
955 | 957 | repo.prefetch(revs, base=base) |
|
956 | 958 | if bgrepack: |
|
957 | 959 | repackmod.backgroundrepack(repo, incremental=True) |
|
958 | 960 | elif bgrepack: |
|
959 | 961 | repackmod.backgroundrepack(repo, incremental=True) |
|
960 | 962 | |
|
961 | 963 | return result |
|
962 | 964 | |
|
963 | 965 | def exchangepull(orig, repo, remote, *args, **kwargs): |
|
964 | 966 | # Hook into the callstream/getbundle to insert bundle capabilities |
|
965 | 967 | # during a pull. |
|
966 | 968 | def localgetbundle(orig, source, heads=None, common=None, bundlecaps=None, |
|
967 | 969 | **kwargs): |
|
968 | 970 | if not bundlecaps: |
|
969 | 971 | bundlecaps = set() |
|
970 | 972 | bundlecaps.add(constants.BUNDLE2_CAPABLITY) |
|
971 | 973 | return orig(source, heads=heads, common=common, bundlecaps=bundlecaps, |
|
972 | 974 | **kwargs) |
|
973 | 975 | |
|
974 | 976 | if util.safehasattr(remote, '_callstream'): |
|
975 | 977 | remote._localrepo = repo |
|
976 | 978 | elif util.safehasattr(remote, 'getbundle'): |
|
977 | 979 | extensions.wrapfunction(remote, 'getbundle', localgetbundle) |
|
978 | 980 | |
|
979 | 981 | return orig(repo, remote, *args, **kwargs) |
|
980 | 982 | |
|
981 | 983 | def _fileprefetchhook(repo, revs, match): |
|
982 | 984 | if isenabled(repo): |
|
983 | 985 | allfiles = [] |
|
984 | 986 | for rev in revs: |
|
985 | 987 | if rev == nodemod.wdirrev or rev is None: |
|
986 | 988 | continue |
|
987 | 989 | ctx = repo[rev] |
|
988 | 990 | mf = ctx.manifest() |
|
989 | 991 | sparsematch = repo.maybesparsematch(ctx.rev()) |
|
990 | 992 | for path in ctx.walk(match): |
|
991 | 993 | if path.endswith('/'): |
|
992 | 994 | # Tree manifest that's being excluded as part of narrow |
|
993 | 995 | continue |
|
994 | 996 | if (not sparsematch or sparsematch(path)) and path in mf: |
|
995 | 997 | allfiles.append((path, hex(mf[path]))) |
|
996 | 998 | repo.fileservice.prefetch(allfiles) |
|
997 | 999 | |
|
998 | 1000 | @command('debugremotefilelog', [ |
|
999 | 1001 | ('d', 'decompress', None, _('decompress the filelog first')), |
|
1000 | 1002 | ], _('hg debugremotefilelog <path>'), norepo=True) |
|
1001 | 1003 | def debugremotefilelog(ui, path, **opts): |
|
1002 | 1004 | return debugcommands.debugremotefilelog(ui, path, **opts) |
|
1003 | 1005 | |
|
1004 | 1006 | @command('verifyremotefilelog', [ |
|
1005 | 1007 | ('d', 'decompress', None, _('decompress the filelogs first')), |
|
1006 | 1008 | ], _('hg verifyremotefilelogs <directory>'), norepo=True) |
|
1007 | 1009 | def verifyremotefilelog(ui, path, **opts): |
|
1008 | 1010 | return debugcommands.verifyremotefilelog(ui, path, **opts) |
|
1009 | 1011 | |
|
1010 | 1012 | @command('debugdatapack', [ |
|
1011 | 1013 | ('', 'long', None, _('print the long hashes')), |
|
1012 | 1014 | ('', 'node', '', _('dump the contents of node'), 'NODE'), |
|
1013 | 1015 | ], _('hg debugdatapack <paths>'), norepo=True) |
|
1014 | 1016 | def debugdatapack(ui, *paths, **opts): |
|
1015 | 1017 | return debugcommands.debugdatapack(ui, *paths, **opts) |
|
1016 | 1018 | |
|
1017 | 1019 | @command('debughistorypack', [ |
|
1018 | 1020 | ], _('hg debughistorypack <path>'), norepo=True) |
|
1019 | 1021 | def debughistorypack(ui, path, **opts): |
|
1020 | 1022 | return debugcommands.debughistorypack(ui, path) |
|
1021 | 1023 | |
|
1022 | 1024 | @command('debugkeepset', [ |
|
1023 | 1025 | ], _('hg debugkeepset')) |
|
1024 | 1026 | def debugkeepset(ui, repo, **opts): |
|
1025 | 1027 | # The command is used to measure keepset computation time |
|
1026 | 1028 | def keyfn(fname, fnode): |
|
1027 | 1029 | return fileserverclient.getcachekey(repo.name, fname, hex(fnode)) |
|
1028 | 1030 | repackmod.keepset(repo, keyfn) |
|
1029 | 1031 | return |
|
1030 | 1032 | |
|
1031 | 1033 | @command('debugwaitonrepack', [ |
|
1032 | 1034 | ], _('hg debugwaitonrepack')) |
|
1033 | 1035 | def debugwaitonrepack(ui, repo, **opts): |
|
1034 | 1036 | return debugcommands.debugwaitonrepack(repo) |
|
1035 | 1037 | |
|
1036 | 1038 | @command('debugwaitonprefetch', [ |
|
1037 | 1039 | ], _('hg debugwaitonprefetch')) |
|
1038 | 1040 | def debugwaitonprefetch(ui, repo, **opts): |
|
1039 | 1041 | return debugcommands.debugwaitonprefetch(repo) |
|
1040 | 1042 | |
|
1041 | 1043 | def resolveprefetchopts(ui, opts): |
|
1042 | 1044 | if not opts.get('rev'): |
|
1043 | 1045 | revset = ['.', 'draft()'] |
|
1044 | 1046 | |
|
1045 | 1047 | prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None) |
|
1046 | 1048 | if prefetchrevset: |
|
1047 | 1049 | revset.append('(%s)' % prefetchrevset) |
|
1048 | 1050 | bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None) |
|
1049 | 1051 | if bgprefetchrevs: |
|
1050 | 1052 | revset.append('(%s)' % bgprefetchrevs) |
|
1051 | 1053 | revset = '+'.join(revset) |
|
1052 | 1054 | |
|
1053 | 1055 | # update a revset with a date limit |
|
1054 | 1056 | revset = revdatelimit(ui, revset) |
|
1055 | 1057 | |
|
1056 | 1058 | opts['rev'] = [revset] |
|
1057 | 1059 | |
|
1058 | 1060 | if not opts.get('base'): |
|
1059 | 1061 | opts['base'] = None |
|
1060 | 1062 | |
|
1061 | 1063 | return opts |
|
1062 | 1064 | |
|
1063 | 1065 | @command('prefetch', [ |
|
1064 | 1066 | ('r', 'rev', [], _('prefetch the specified revisions'), _('REV')), |
|
1065 | 1067 | ('', 'repack', False, _('run repack after prefetch')), |
|
1066 | 1068 | ('b', 'base', '', _("rev that is assumed to already be local")), |
|
1067 | 1069 | ] + commands.walkopts, _('hg prefetch [OPTIONS] [FILE...]')) |
|
1068 | 1070 | def prefetch(ui, repo, *pats, **opts): |
|
1069 | 1071 | """prefetch file revisions from the server |
|
1070 | 1072 | |
|
1071 | 1073 | Prefetchs file revisions for the specified revs and stores them in the |
|
1072 | 1074 | local remotefilelog cache. If no rev is specified, the default rev is |
|
1073 | 1075 | used which is the union of dot, draft, pullprefetch and bgprefetchrev. |
|
1074 | 1076 | File names or patterns can be used to limit which files are downloaded. |
|
1075 | 1077 | |
|
1076 | 1078 | Return 0 on success. |
|
1077 | 1079 | """ |
|
1078 | 1080 | opts = pycompat.byteskwargs(opts) |
|
1079 | 1081 | if not isenabled(repo): |
|
1080 | 1082 | raise error.Abort(_("repo is not shallow")) |
|
1081 | 1083 | |
|
1082 | 1084 | opts = resolveprefetchopts(ui, opts) |
|
1083 | 1085 | revs = scmutil.revrange(repo, opts.get('rev')) |
|
1084 | 1086 | repo.prefetch(revs, opts.get('base'), pats, opts) |
|
1085 | 1087 | |
|
1086 | 1088 | # Run repack in background |
|
1087 | 1089 | if opts.get('repack'): |
|
1088 | 1090 | repackmod.backgroundrepack(repo, incremental=True) |
|
1089 | 1091 | |
|
1090 | 1092 | @command('repack', [ |
|
1091 | 1093 | ('', 'background', None, _('run in a background process'), None), |
|
1092 | 1094 | ('', 'incremental', None, _('do an incremental repack'), None), |
|
1093 | 1095 | ('', 'packsonly', None, _('only repack packs (skip loose objects)'), None), |
|
1094 | 1096 | ], _('hg repack [OPTIONS]')) |
|
1095 | 1097 | def repack_(ui, repo, *pats, **opts): |
|
1096 | 1098 | if opts.get(r'background'): |
|
1097 | 1099 | repackmod.backgroundrepack(repo, incremental=opts.get(r'incremental'), |
|
1098 | 1100 | packsonly=opts.get(r'packsonly', False)) |
|
1099 | 1101 | return |
|
1100 | 1102 | |
|
1101 | 1103 | options = {'packsonly': opts.get(r'packsonly')} |
|
1102 | 1104 | |
|
1103 | 1105 | try: |
|
1104 | 1106 | if opts.get(r'incremental'): |
|
1105 | 1107 | repackmod.incrementalrepack(repo, options=options) |
|
1106 | 1108 | else: |
|
1107 | 1109 | repackmod.fullrepack(repo, options=options) |
|
1108 | 1110 | except repackmod.RepackAlreadyRunning as ex: |
|
1109 | 1111 | # Don't propogate the exception if the repack is already in |
|
1110 | 1112 | # progress, since we want the command to exit 0. |
|
1111 | 1113 | repo.ui.warn('%s\n' % ex) |
@@ -1,348 +1,348 | |||
|
1 | 1 | # sparse.py - allow sparse checkouts of the working directory |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2014 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | """allow sparse checkouts of the working directory (EXPERIMENTAL) |
|
9 | 9 | |
|
10 | 10 | (This extension is not yet protected by backwards compatibility |
|
11 | 11 | guarantees. Any aspect may break in future releases until this |
|
12 | 12 | notice is removed.) |
|
13 | 13 | |
|
14 | 14 | This extension allows the working directory to only consist of a |
|
15 | 15 | subset of files for the revision. This allows specific files or |
|
16 | 16 | directories to be explicitly included or excluded. Many repository |
|
17 | 17 | operations have performance proportional to the number of files in |
|
18 | 18 | the working directory. So only realizing a subset of files in the |
|
19 | 19 | working directory can improve performance. |
|
20 | 20 | |
|
21 | 21 | Sparse Config Files |
|
22 | 22 | ------------------- |
|
23 | 23 | |
|
24 | 24 | The set of files that are part of a sparse checkout are defined by |
|
25 | 25 | a sparse config file. The file defines 3 things: includes (files to |
|
26 | 26 | include in the sparse checkout), excludes (files to exclude from the |
|
27 | 27 | sparse checkout), and profiles (links to other config files). |
|
28 | 28 | |
|
29 | 29 | The file format is newline delimited. Empty lines and lines beginning |
|
30 | 30 | with ``#`` are ignored. |
|
31 | 31 | |
|
32 | 32 | Lines beginning with ``%include `` denote another sparse config file |
|
33 | 33 | to include. e.g. ``%include tests.sparse``. The filename is relative |
|
34 | 34 | to the repository root. |
|
35 | 35 | |
|
36 | 36 | The special lines ``[include]`` and ``[exclude]`` denote the section |
|
37 | 37 | for includes and excludes that follow, respectively. It is illegal to |
|
38 | 38 | have ``[include]`` after ``[exclude]``. |
|
39 | 39 | |
|
40 | 40 | Non-special lines resemble file patterns to be added to either includes |
|
41 | 41 | or excludes. The syntax of these lines is documented by :hg:`help patterns`. |
|
42 | 42 | Patterns are interpreted as ``glob:`` by default and match against the |
|
43 | 43 | root of the repository. |
|
44 | 44 | |
|
45 | 45 | Exclusion patterns take precedence over inclusion patterns. So even |
|
46 | 46 | if a file is explicitly included, an ``[exclude]`` entry can remove it. |
|
47 | 47 | |
|
48 | 48 | For example, say you have a repository with 3 directories, ``frontend/``, |
|
49 | 49 | ``backend/``, and ``tools/``. ``frontend/`` and ``backend/`` correspond |
|
50 | 50 | to different projects and it is uncommon for someone working on one |
|
51 | 51 | to need the files for the other. But ``tools/`` contains files shared |
|
52 | 52 | between both projects. Your sparse config files may resemble:: |
|
53 | 53 | |
|
54 | 54 | # frontend.sparse |
|
55 | 55 | frontend/** |
|
56 | 56 | tools/** |
|
57 | 57 | |
|
58 | 58 | # backend.sparse |
|
59 | 59 | backend/** |
|
60 | 60 | tools/** |
|
61 | 61 | |
|
62 | 62 | Say the backend grows in size. Or there's a directory with thousands |
|
63 | 63 | of files you wish to exclude. You can modify the profile to exclude |
|
64 | 64 | certain files:: |
|
65 | 65 | |
|
66 | 66 | [include] |
|
67 | 67 | backend/** |
|
68 | 68 | tools/** |
|
69 | 69 | |
|
70 | 70 | [exclude] |
|
71 | 71 | tools/tests/** |
|
72 | 72 | """ |
|
73 | 73 | |
|
74 | 74 | from __future__ import absolute_import |
|
75 | 75 | |
|
76 | 76 | from mercurial.i18n import _ |
|
77 | 77 | from mercurial import ( |
|
78 | 78 | commands, |
|
79 | 79 | dirstate, |
|
80 | 80 | error, |
|
81 | 81 | extensions, |
|
82 | 82 | hg, |
|
83 | 83 | logcmdutil, |
|
84 | 84 | match as matchmod, |
|
85 | 85 | pycompat, |
|
86 | 86 | registrar, |
|
87 | 87 | sparse, |
|
88 | 88 | util, |
|
89 | 89 | ) |
|
90 | 90 | |
|
91 | 91 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
92 | 92 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
93 | 93 | # be specifying the version(s) of Mercurial they are tested with, or |
|
94 | 94 | # leave the attribute unspecified. |
|
95 | 95 | testedwith = 'ships-with-hg-core' |
|
96 | 96 | |
|
97 | 97 | cmdtable = {} |
|
98 | 98 | command = registrar.command(cmdtable) |
|
99 | 99 | |
|
100 | 100 | def extsetup(ui): |
|
101 | 101 | sparse.enabled = True |
|
102 | 102 | |
|
103 | 103 | _setupclone(ui) |
|
104 | 104 | _setuplog(ui) |
|
105 | 105 | _setupadd(ui) |
|
106 | 106 | _setupdirstate(ui) |
|
107 | 107 | |
|
108 | 108 | def replacefilecache(cls, propname, replacement): |
|
109 | 109 | """Replace a filecache property with a new class. This allows changing the |
|
110 | 110 | cache invalidation condition.""" |
|
111 | 111 | origcls = cls |
|
112 | 112 | assert callable(replacement) |
|
113 | 113 | while cls is not object: |
|
114 | 114 | if propname in cls.__dict__: |
|
115 | 115 | orig = cls.__dict__[propname] |
|
116 | 116 | setattr(cls, propname, replacement(orig)) |
|
117 | 117 | break |
|
118 | 118 | cls = cls.__bases__[0] |
|
119 | 119 | |
|
120 | 120 | if cls is object: |
|
121 | 121 | raise AttributeError(_("type '%s' has no property '%s'") % (origcls, |
|
122 | 122 | propname)) |
|
123 | 123 | |
|
124 | 124 | def _setuplog(ui): |
|
125 | 125 | entry = commands.table['log|history'] |
|
126 | 126 | entry[1].append(('', 'sparse', None, |
|
127 | 127 | "limit to changesets affecting the sparse checkout")) |
|
128 | 128 | |
|
129 | 129 | def _initialrevs(orig, repo, opts): |
|
130 | 130 | revs = orig(repo, opts) |
|
131 | 131 | if opts.get('sparse'): |
|
132 | 132 | sparsematch = sparse.matcher(repo) |
|
133 | 133 | def ctxmatch(rev): |
|
134 | 134 | ctx = repo[rev] |
|
135 | 135 | return any(f for f in ctx.files() if sparsematch(f)) |
|
136 | 136 | revs = revs.filter(ctxmatch) |
|
137 | 137 | return revs |
|
138 | 138 | extensions.wrapfunction(logcmdutil, '_initialrevs', _initialrevs) |
|
139 | 139 | |
|
140 | 140 | def _clonesparsecmd(orig, ui, repo, *args, **opts): |
|
141 | 141 | include_pat = opts.get(r'include') |
|
142 | 142 | exclude_pat = opts.get(r'exclude') |
|
143 | 143 | enableprofile_pat = opts.get(r'enable_profile') |
|
144 | 144 | narrow_pat = opts.get(r'narrow') |
|
145 | 145 | include = exclude = enableprofile = False |
|
146 | 146 | if include_pat: |
|
147 | 147 | pat = include_pat |
|
148 | 148 | include = True |
|
149 | 149 | if exclude_pat: |
|
150 | 150 | pat = exclude_pat |
|
151 | 151 | exclude = True |
|
152 | 152 | if enableprofile_pat: |
|
153 | 153 | pat = enableprofile_pat |
|
154 | 154 | enableprofile = True |
|
155 | 155 | if sum([include, exclude, enableprofile]) > 1: |
|
156 | 156 | raise error.Abort(_("too many flags specified.")) |
|
157 | 157 | # if --narrow is passed, it means they are includes and excludes for narrow |
|
158 | 158 | # clone |
|
159 | 159 | if not narrow_pat and (include or exclude or enableprofile): |
|
160 | 160 | def clonesparse(orig, self, node, overwrite, *args, **kwargs): |
|
161 | 161 | sparse.updateconfig(self.unfiltered(), pat, {}, include=include, |
|
162 | 162 | exclude=exclude, enableprofile=enableprofile, |
|
163 | 163 | usereporootpaths=True) |
|
164 | 164 | return orig(self, node, overwrite, *args, **kwargs) |
|
165 | 165 | extensions.wrapfunction(hg, 'updaterepo', clonesparse) |
|
166 | 166 | return orig(ui, repo, *args, **opts) |
|
167 | 167 | |
|
168 | 168 | def _setupclone(ui): |
|
169 | 169 | entry = commands.table['clone'] |
|
170 | 170 | entry[1].append(('', 'enable-profile', [], |
|
171 | 171 | 'enable a sparse profile')) |
|
172 | 172 | entry[1].append(('', 'include', [], |
|
173 | 173 | 'include sparse pattern')) |
|
174 | 174 | entry[1].append(('', 'exclude', [], |
|
175 | 175 | 'exclude sparse pattern')) |
|
176 | 176 | extensions.wrapcommand(commands.table, 'clone', _clonesparsecmd) |
|
177 | 177 | |
|
178 | 178 | def _setupadd(ui): |
|
179 | 179 | entry = commands.table['add'] |
|
180 | 180 | entry[1].append(('s', 'sparse', None, |
|
181 | 181 | 'also include directories of added files in sparse config')) |
|
182 | 182 | |
|
183 | 183 | def _add(orig, ui, repo, *pats, **opts): |
|
184 | 184 | if opts.get(r'sparse'): |
|
185 | 185 | dirs = set() |
|
186 | 186 | for pat in pats: |
|
187 | 187 | dirname, basename = util.split(pat) |
|
188 | 188 | dirs.add(dirname) |
|
189 | 189 | sparse.updateconfig(repo, list(dirs), opts, include=True) |
|
190 | 190 | return orig(ui, repo, *pats, **opts) |
|
191 | 191 | |
|
192 | 192 | extensions.wrapcommand(commands.table, 'add', _add) |
|
193 | 193 | |
|
194 | 194 | def _setupdirstate(ui): |
|
195 | 195 | """Modify the dirstate to prevent stat'ing excluded files, |
|
196 | 196 | and to prevent modifications to files outside the checkout. |
|
197 | 197 | """ |
|
198 | 198 | |
|
199 | 199 | def walk(orig, self, match, subrepos, unknown, ignored, full=True): |
|
200 | 200 | # hack to not exclude explicitly-specified paths so that they can |
|
201 | 201 | # be warned later on e.g. dirstate.add() |
|
202 | 202 | em = matchmod.exact(match.files()) |
|
203 | 203 | sm = matchmod.unionmatcher([self._sparsematcher, em]) |
|
204 | 204 | match = matchmod.intersectmatchers(match, sm) |
|
205 | 205 | return orig(self, match, subrepos, unknown, ignored, full) |
|
206 | 206 | |
|
207 | 207 | extensions.wrapfunction(dirstate.dirstate, 'walk', walk) |
|
208 | 208 | |
|
209 | 209 | # dirstate.rebuild should not add non-matching files |
|
210 | 210 | def _rebuild(orig, self, parent, allfiles, changedfiles=None): |
|
211 | 211 | matcher = self._sparsematcher |
|
212 | 212 | if not matcher.always(): |
|
213 | 213 | allfiles = [f for f in allfiles if matcher(f)] |
|
214 | 214 | if changedfiles: |
|
215 | 215 | changedfiles = [f for f in changedfiles if matcher(f)] |
|
216 | 216 | |
|
217 | 217 | if changedfiles is not None: |
|
218 | 218 | # In _rebuild, these files will be deleted from the dirstate |
|
219 | 219 | # when they are not found to be in allfiles |
|
220 | 220 | dirstatefilestoremove = set(f for f in self if not matcher(f)) |
|
221 | 221 | changedfiles = dirstatefilestoremove.union(changedfiles) |
|
222 | 222 | |
|
223 | 223 | return orig(self, parent, allfiles, changedfiles) |
|
224 | 224 | extensions.wrapfunction(dirstate.dirstate, 'rebuild', _rebuild) |
|
225 | 225 | |
|
226 | 226 | # Prevent adding files that are outside the sparse checkout |
|
227 | 227 | editfuncs = ['normal', 'add', 'normallookup', 'copy', 'remove', 'merge'] |
|
228 | 228 | hint = _('include file with `hg debugsparse --include <pattern>` or use ' + |
|
229 | 229 | '`hg add -s <file>` to include file directory while adding') |
|
230 | 230 | for func in editfuncs: |
|
231 | def _wrapper(orig, self, *args): | |
|
231 | def _wrapper(orig, self, *args, **kwargs): | |
|
232 | 232 | sparsematch = self._sparsematcher |
|
233 | 233 | if not sparsematch.always(): |
|
234 | 234 | for f in args: |
|
235 | 235 | if (f is not None and not sparsematch(f) and |
|
236 | 236 | f not in self): |
|
237 | 237 | raise error.Abort(_("cannot add '%s' - it is outside " |
|
238 | 238 | "the sparse checkout") % f, |
|
239 | 239 | hint=hint) |
|
240 | return orig(self, *args) | |
|
240 | return orig(self, *args, **kwargs) | |
|
241 | 241 | extensions.wrapfunction(dirstate.dirstate, func, _wrapper) |
|
242 | 242 | |
|
243 | 243 | @command('debugsparse', [ |
|
244 | 244 | ('I', 'include', False, _('include files in the sparse checkout')), |
|
245 | 245 | ('X', 'exclude', False, _('exclude files in the sparse checkout')), |
|
246 | 246 | ('d', 'delete', False, _('delete an include/exclude rule')), |
|
247 | 247 | ('f', 'force', False, _('allow changing rules even with pending changes')), |
|
248 | 248 | ('', 'enable-profile', False, _('enables the specified profile')), |
|
249 | 249 | ('', 'disable-profile', False, _('disables the specified profile')), |
|
250 | 250 | ('', 'import-rules', False, _('imports rules from a file')), |
|
251 | 251 | ('', 'clear-rules', False, _('clears local include/exclude rules')), |
|
252 | 252 | ('', 'refresh', False, _('updates the working after sparseness changes')), |
|
253 | 253 | ('', 'reset', False, _('makes the repo full again')), |
|
254 | 254 | ] + commands.templateopts, |
|
255 | 255 | _('[--OPTION] PATTERN...'), |
|
256 | 256 | helpbasic=True) |
|
257 | 257 | def debugsparse(ui, repo, *pats, **opts): |
|
258 | 258 | """make the current checkout sparse, or edit the existing checkout |
|
259 | 259 | |
|
260 | 260 | The sparse command is used to make the current checkout sparse. |
|
261 | 261 | This means files that don't meet the sparse condition will not be |
|
262 | 262 | written to disk, or show up in any working copy operations. It does |
|
263 | 263 | not affect files in history in any way. |
|
264 | 264 | |
|
265 | 265 | Passing no arguments prints the currently applied sparse rules. |
|
266 | 266 | |
|
267 | 267 | --include and --exclude are used to add and remove files from the sparse |
|
268 | 268 | checkout. The effects of adding an include or exclude rule are applied |
|
269 | 269 | immediately. If applying the new rule would cause a file with pending |
|
270 | 270 | changes to be added or removed, the command will fail. Pass --force to |
|
271 | 271 | force a rule change even with pending changes (the changes on disk will |
|
272 | 272 | be preserved). |
|
273 | 273 | |
|
274 | 274 | --delete removes an existing include/exclude rule. The effects are |
|
275 | 275 | immediate. |
|
276 | 276 | |
|
277 | 277 | --refresh refreshes the files on disk based on the sparse rules. This is |
|
278 | 278 | only necessary if .hg/sparse was changed by hand. |
|
279 | 279 | |
|
280 | 280 | --enable-profile and --disable-profile accept a path to a .hgsparse file. |
|
281 | 281 | This allows defining sparse checkouts and tracking them inside the |
|
282 | 282 | repository. This is useful for defining commonly used sparse checkouts for |
|
283 | 283 | many people to use. As the profile definition changes over time, the sparse |
|
284 | 284 | checkout will automatically be updated appropriately, depending on which |
|
285 | 285 | changeset is checked out. Changes to .hgsparse are not applied until they |
|
286 | 286 | have been committed. |
|
287 | 287 | |
|
288 | 288 | --import-rules accepts a path to a file containing rules in the .hgsparse |
|
289 | 289 | format, allowing you to add --include, --exclude and --enable-profile rules |
|
290 | 290 | in bulk. Like the --include, --exclude and --enable-profile switches, the |
|
291 | 291 | changes are applied immediately. |
|
292 | 292 | |
|
293 | 293 | --clear-rules removes all local include and exclude rules, while leaving |
|
294 | 294 | any enabled profiles in place. |
|
295 | 295 | |
|
296 | 296 | Returns 0 if editing the sparse checkout succeeds. |
|
297 | 297 | """ |
|
298 | 298 | opts = pycompat.byteskwargs(opts) |
|
299 | 299 | include = opts.get('include') |
|
300 | 300 | exclude = opts.get('exclude') |
|
301 | 301 | force = opts.get('force') |
|
302 | 302 | enableprofile = opts.get('enable_profile') |
|
303 | 303 | disableprofile = opts.get('disable_profile') |
|
304 | 304 | importrules = opts.get('import_rules') |
|
305 | 305 | clearrules = opts.get('clear_rules') |
|
306 | 306 | delete = opts.get('delete') |
|
307 | 307 | refresh = opts.get('refresh') |
|
308 | 308 | reset = opts.get('reset') |
|
309 | 309 | count = sum([include, exclude, enableprofile, disableprofile, delete, |
|
310 | 310 | importrules, refresh, clearrules, reset]) |
|
311 | 311 | if count > 1: |
|
312 | 312 | raise error.Abort(_("too many flags specified")) |
|
313 | 313 | |
|
314 | 314 | if count == 0: |
|
315 | 315 | if repo.vfs.exists('sparse'): |
|
316 | 316 | ui.status(repo.vfs.read("sparse") + "\n") |
|
317 | 317 | temporaryincludes = sparse.readtemporaryincludes(repo) |
|
318 | 318 | if temporaryincludes: |
|
319 | 319 | ui.status(_("Temporarily Included Files (for merge/rebase):\n")) |
|
320 | 320 | ui.status(("\n".join(temporaryincludes) + "\n")) |
|
321 | 321 | return |
|
322 | 322 | else: |
|
323 | 323 | raise error.Abort(_('the debugsparse command is only supported on' |
|
324 | 324 | ' sparse repositories')) |
|
325 | 325 | |
|
326 | 326 | if include or exclude or delete or reset or enableprofile or disableprofile: |
|
327 | 327 | sparse.updateconfig(repo, pats, opts, include=include, exclude=exclude, |
|
328 | 328 | reset=reset, delete=delete, |
|
329 | 329 | enableprofile=enableprofile, |
|
330 | 330 | disableprofile=disableprofile, force=force) |
|
331 | 331 | |
|
332 | 332 | if importrules: |
|
333 | 333 | sparse.importfromfiles(repo, opts, pats, force=force) |
|
334 | 334 | |
|
335 | 335 | if clearrules: |
|
336 | 336 | sparse.clearrules(repo, force=force) |
|
337 | 337 | |
|
338 | 338 | if refresh: |
|
339 | 339 | try: |
|
340 | 340 | wlock = repo.wlock() |
|
341 | 341 | fcounts = map( |
|
342 | 342 | len, |
|
343 | 343 | sparse.refreshwdir(repo, repo.status(), sparse.matcher(repo), |
|
344 | 344 | force=force)) |
|
345 | 345 | sparse.printchanges(ui, opts, added=fcounts[0], dropped=fcounts[1], |
|
346 | 346 | conflicting=fcounts[2]) |
|
347 | 347 | finally: |
|
348 | 348 | wlock.release() |
@@ -1,2604 +1,2606 | |||
|
1 | 1 | # context.py - changeset and file context objects for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import errno |
|
11 | 11 | import filecmp |
|
12 | 12 | import os |
|
13 | 13 | import stat |
|
14 | 14 | |
|
15 | 15 | from .i18n import _ |
|
16 | 16 | from .node import ( |
|
17 | 17 | addednodeid, |
|
18 | 18 | hex, |
|
19 | 19 | modifiednodeid, |
|
20 | 20 | nullid, |
|
21 | 21 | nullrev, |
|
22 | 22 | short, |
|
23 | 23 | wdirfilenodeids, |
|
24 | 24 | wdirhex, |
|
25 | 25 | ) |
|
26 | 26 | from . import ( |
|
27 | 27 | dagop, |
|
28 | 28 | encoding, |
|
29 | 29 | error, |
|
30 | 30 | fileset, |
|
31 | 31 | match as matchmod, |
|
32 | 32 | obsolete as obsmod, |
|
33 | 33 | patch, |
|
34 | 34 | pathutil, |
|
35 | 35 | phases, |
|
36 | 36 | pycompat, |
|
37 | 37 | repoview, |
|
38 | 38 | scmutil, |
|
39 | 39 | sparse, |
|
40 | 40 | subrepo, |
|
41 | 41 | subrepoutil, |
|
42 | 42 | util, |
|
43 | 43 | ) |
|
44 | 44 | from .utils import ( |
|
45 | 45 | dateutil, |
|
46 | 46 | stringutil, |
|
47 | 47 | ) |
|
48 | 48 | |
|
49 | 49 | propertycache = util.propertycache |
|
50 | 50 | |
|
51 | 51 | class basectx(object): |
|
52 | 52 | """A basectx object represents the common logic for its children: |
|
53 | 53 | changectx: read-only context that is already present in the repo, |
|
54 | 54 | workingctx: a context that represents the working directory and can |
|
55 | 55 | be committed, |
|
56 | 56 | memctx: a context that represents changes in-memory and can also |
|
57 | 57 | be committed.""" |
|
58 | 58 | |
|
59 | 59 | def __init__(self, repo): |
|
60 | 60 | self._repo = repo |
|
61 | 61 | |
|
62 | 62 | def __bytes__(self): |
|
63 | 63 | return short(self.node()) |
|
64 | 64 | |
|
65 | 65 | __str__ = encoding.strmethod(__bytes__) |
|
66 | 66 | |
|
67 | 67 | def __repr__(self): |
|
68 | 68 | return r"<%s %s>" % (type(self).__name__, str(self)) |
|
69 | 69 | |
|
70 | 70 | def __eq__(self, other): |
|
71 | 71 | try: |
|
72 | 72 | return type(self) == type(other) and self._rev == other._rev |
|
73 | 73 | except AttributeError: |
|
74 | 74 | return False |
|
75 | 75 | |
|
76 | 76 | def __ne__(self, other): |
|
77 | 77 | return not (self == other) |
|
78 | 78 | |
|
79 | 79 | def __contains__(self, key): |
|
80 | 80 | return key in self._manifest |
|
81 | 81 | |
|
82 | 82 | def __getitem__(self, key): |
|
83 | 83 | return self.filectx(key) |
|
84 | 84 | |
|
85 | 85 | def __iter__(self): |
|
86 | 86 | return iter(self._manifest) |
|
87 | 87 | |
|
88 | 88 | def _buildstatusmanifest(self, status): |
|
89 | 89 | """Builds a manifest that includes the given status results, if this is |
|
90 | 90 | a working copy context. For non-working copy contexts, it just returns |
|
91 | 91 | the normal manifest.""" |
|
92 | 92 | return self.manifest() |
|
93 | 93 | |
|
94 | 94 | def _matchstatus(self, other, match): |
|
95 | 95 | """This internal method provides a way for child objects to override the |
|
96 | 96 | match operator. |
|
97 | 97 | """ |
|
98 | 98 | return match |
|
99 | 99 | |
|
100 | 100 | def _buildstatus(self, other, s, match, listignored, listclean, |
|
101 | 101 | listunknown): |
|
102 | 102 | """build a status with respect to another context""" |
|
103 | 103 | # Load earliest manifest first for caching reasons. More specifically, |
|
104 | 104 | # if you have revisions 1000 and 1001, 1001 is probably stored as a |
|
105 | 105 | # delta against 1000. Thus, if you read 1000 first, we'll reconstruct |
|
106 | 106 | # 1000 and cache it so that when you read 1001, we just need to apply a |
|
107 | 107 | # delta to what's in the cache. So that's one full reconstruction + one |
|
108 | 108 | # delta application. |
|
109 | 109 | mf2 = None |
|
110 | 110 | if self.rev() is not None and self.rev() < other.rev(): |
|
111 | 111 | mf2 = self._buildstatusmanifest(s) |
|
112 | 112 | mf1 = other._buildstatusmanifest(s) |
|
113 | 113 | if mf2 is None: |
|
114 | 114 | mf2 = self._buildstatusmanifest(s) |
|
115 | 115 | |
|
116 | 116 | modified, added = [], [] |
|
117 | 117 | removed = [] |
|
118 | 118 | clean = [] |
|
119 | 119 | deleted, unknown, ignored = s.deleted, s.unknown, s.ignored |
|
120 | 120 | deletedset = set(deleted) |
|
121 | 121 | d = mf1.diff(mf2, match=match, clean=listclean) |
|
122 | 122 | for fn, value in d.iteritems(): |
|
123 | 123 | if fn in deletedset: |
|
124 | 124 | continue |
|
125 | 125 | if value is None: |
|
126 | 126 | clean.append(fn) |
|
127 | 127 | continue |
|
128 | 128 | (node1, flag1), (node2, flag2) = value |
|
129 | 129 | if node1 is None: |
|
130 | 130 | added.append(fn) |
|
131 | 131 | elif node2 is None: |
|
132 | 132 | removed.append(fn) |
|
133 | 133 | elif flag1 != flag2: |
|
134 | 134 | modified.append(fn) |
|
135 | 135 | elif node2 not in wdirfilenodeids: |
|
136 | 136 | # When comparing files between two commits, we save time by |
|
137 | 137 | # not comparing the file contents when the nodeids differ. |
|
138 | 138 | # Note that this means we incorrectly report a reverted change |
|
139 | 139 | # to a file as a modification. |
|
140 | 140 | modified.append(fn) |
|
141 | 141 | elif self[fn].cmp(other[fn]): |
|
142 | 142 | modified.append(fn) |
|
143 | 143 | else: |
|
144 | 144 | clean.append(fn) |
|
145 | 145 | |
|
146 | 146 | if removed: |
|
147 | 147 | # need to filter files if they are already reported as removed |
|
148 | 148 | unknown = [fn for fn in unknown if fn not in mf1 and |
|
149 | 149 | (not match or match(fn))] |
|
150 | 150 | ignored = [fn for fn in ignored if fn not in mf1 and |
|
151 | 151 | (not match or match(fn))] |
|
152 | 152 | # if they're deleted, don't report them as removed |
|
153 | 153 | removed = [fn for fn in removed if fn not in deletedset] |
|
154 | 154 | |
|
155 | 155 | return scmutil.status(modified, added, removed, deleted, unknown, |
|
156 | 156 | ignored, clean) |
|
157 | 157 | |
|
158 | 158 | @propertycache |
|
159 | 159 | def substate(self): |
|
160 | 160 | return subrepoutil.state(self, self._repo.ui) |
|
161 | 161 | |
|
162 | 162 | def subrev(self, subpath): |
|
163 | 163 | return self.substate[subpath][1] |
|
164 | 164 | |
|
165 | 165 | def rev(self): |
|
166 | 166 | return self._rev |
|
167 | 167 | def node(self): |
|
168 | 168 | return self._node |
|
169 | 169 | def hex(self): |
|
170 | 170 | return hex(self.node()) |
|
171 | 171 | def manifest(self): |
|
172 | 172 | return self._manifest |
|
173 | 173 | def manifestctx(self): |
|
174 | 174 | return self._manifestctx |
|
175 | 175 | def repo(self): |
|
176 | 176 | return self._repo |
|
177 | 177 | def phasestr(self): |
|
178 | 178 | return phases.phasenames[self.phase()] |
|
179 | 179 | def mutable(self): |
|
180 | 180 | return self.phase() > phases.public |
|
181 | 181 | |
|
182 | 182 | def matchfileset(self, expr, badfn=None): |
|
183 | 183 | return fileset.match(self, expr, badfn=badfn) |
|
184 | 184 | |
|
185 | 185 | def obsolete(self): |
|
186 | 186 | """True if the changeset is obsolete""" |
|
187 | 187 | return self.rev() in obsmod.getrevs(self._repo, 'obsolete') |
|
188 | 188 | |
|
189 | 189 | def extinct(self): |
|
190 | 190 | """True if the changeset is extinct""" |
|
191 | 191 | return self.rev() in obsmod.getrevs(self._repo, 'extinct') |
|
192 | 192 | |
|
193 | 193 | def orphan(self): |
|
194 | 194 | """True if the changeset is not obsolete, but its ancestor is""" |
|
195 | 195 | return self.rev() in obsmod.getrevs(self._repo, 'orphan') |
|
196 | 196 | |
|
197 | 197 | def phasedivergent(self): |
|
198 | 198 | """True if the changeset tries to be a successor of a public changeset |
|
199 | 199 | |
|
200 | 200 | Only non-public and non-obsolete changesets may be phase-divergent. |
|
201 | 201 | """ |
|
202 | 202 | return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent') |
|
203 | 203 | |
|
204 | 204 | def contentdivergent(self): |
|
205 | 205 | """Is a successor of a changeset with multiple possible successor sets |
|
206 | 206 | |
|
207 | 207 | Only non-public and non-obsolete changesets may be content-divergent. |
|
208 | 208 | """ |
|
209 | 209 | return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent') |
|
210 | 210 | |
|
211 | 211 | def isunstable(self): |
|
212 | 212 | """True if the changeset is either orphan, phase-divergent or |
|
213 | 213 | content-divergent""" |
|
214 | 214 | return self.orphan() or self.phasedivergent() or self.contentdivergent() |
|
215 | 215 | |
|
216 | 216 | def instabilities(self): |
|
217 | 217 | """return the list of instabilities affecting this changeset. |
|
218 | 218 | |
|
219 | 219 | Instabilities are returned as strings. possible values are: |
|
220 | 220 | - orphan, |
|
221 | 221 | - phase-divergent, |
|
222 | 222 | - content-divergent. |
|
223 | 223 | """ |
|
224 | 224 | instabilities = [] |
|
225 | 225 | if self.orphan(): |
|
226 | 226 | instabilities.append('orphan') |
|
227 | 227 | if self.phasedivergent(): |
|
228 | 228 | instabilities.append('phase-divergent') |
|
229 | 229 | if self.contentdivergent(): |
|
230 | 230 | instabilities.append('content-divergent') |
|
231 | 231 | return instabilities |
|
232 | 232 | |
|
233 | 233 | def parents(self): |
|
234 | 234 | """return contexts for each parent changeset""" |
|
235 | 235 | return self._parents |
|
236 | 236 | |
|
237 | 237 | def p1(self): |
|
238 | 238 | return self._parents[0] |
|
239 | 239 | |
|
240 | 240 | def p2(self): |
|
241 | 241 | parents = self._parents |
|
242 | 242 | if len(parents) == 2: |
|
243 | 243 | return parents[1] |
|
244 | 244 | return self._repo[nullrev] |
|
245 | 245 | |
|
246 | 246 | def _fileinfo(self, path): |
|
247 | 247 | if r'_manifest' in self.__dict__: |
|
248 | 248 | try: |
|
249 | 249 | return self._manifest[path], self._manifest.flags(path) |
|
250 | 250 | except KeyError: |
|
251 | 251 | raise error.ManifestLookupError(self._node, path, |
|
252 | 252 | _('not found in manifest')) |
|
253 | 253 | if r'_manifestdelta' in self.__dict__ or path in self.files(): |
|
254 | 254 | if path in self._manifestdelta: |
|
255 | 255 | return (self._manifestdelta[path], |
|
256 | 256 | self._manifestdelta.flags(path)) |
|
257 | 257 | mfl = self._repo.manifestlog |
|
258 | 258 | try: |
|
259 | 259 | node, flag = mfl[self._changeset.manifest].find(path) |
|
260 | 260 | except KeyError: |
|
261 | 261 | raise error.ManifestLookupError(self._node, path, |
|
262 | 262 | _('not found in manifest')) |
|
263 | 263 | |
|
264 | 264 | return node, flag |
|
265 | 265 | |
|
266 | 266 | def filenode(self, path): |
|
267 | 267 | return self._fileinfo(path)[0] |
|
268 | 268 | |
|
269 | 269 | def flags(self, path): |
|
270 | 270 | try: |
|
271 | 271 | return self._fileinfo(path)[1] |
|
272 | 272 | except error.LookupError: |
|
273 | 273 | return '' |
|
274 | 274 | |
|
275 | 275 | @propertycache |
|
276 | 276 | def _copies(self): |
|
277 | 277 | p1copies = {} |
|
278 | 278 | p2copies = {} |
|
279 | 279 | p1 = self.p1() |
|
280 | 280 | p2 = self.p2() |
|
281 | 281 | narrowmatch = self._repo.narrowmatch() |
|
282 | 282 | for dst in self.files(): |
|
283 | 283 | if not narrowmatch(dst) or dst not in self: |
|
284 | 284 | continue |
|
285 | 285 | copied = self[dst].renamed() |
|
286 | 286 | if not copied: |
|
287 | 287 | continue |
|
288 | 288 | src, srcnode = copied |
|
289 | 289 | if src in p1 and p1[src].filenode() == srcnode: |
|
290 | 290 | p1copies[dst] = src |
|
291 | 291 | elif src in p2 and p2[src].filenode() == srcnode: |
|
292 | 292 | p2copies[dst] = src |
|
293 | 293 | return p1copies, p2copies |
|
294 | 294 | def p1copies(self): |
|
295 | 295 | return self._copies[0] |
|
296 | 296 | def p2copies(self): |
|
297 | 297 | return self._copies[1] |
|
298 | 298 | |
|
299 | 299 | def sub(self, path, allowcreate=True): |
|
300 | 300 | '''return a subrepo for the stored revision of path, never wdir()''' |
|
301 | 301 | return subrepo.subrepo(self, path, allowcreate=allowcreate) |
|
302 | 302 | |
|
303 | 303 | def nullsub(self, path, pctx): |
|
304 | 304 | return subrepo.nullsubrepo(self, path, pctx) |
|
305 | 305 | |
|
306 | 306 | def workingsub(self, path): |
|
307 | 307 | '''return a subrepo for the stored revision, or wdir if this is a wdir |
|
308 | 308 | context. |
|
309 | 309 | ''' |
|
310 | 310 | return subrepo.subrepo(self, path, allowwdir=True) |
|
311 | 311 | |
|
312 | 312 | def match(self, pats=None, include=None, exclude=None, default='glob', |
|
313 | 313 | listsubrepos=False, badfn=None): |
|
314 | 314 | r = self._repo |
|
315 | 315 | return matchmod.match(r.root, r.getcwd(), pats, |
|
316 | 316 | include, exclude, default, |
|
317 | 317 | auditor=r.nofsauditor, ctx=self, |
|
318 | 318 | listsubrepos=listsubrepos, badfn=badfn) |
|
319 | 319 | |
|
320 | 320 | def diff(self, ctx2=None, match=None, changes=None, opts=None, |
|
321 | 321 | losedatafn=None, pathfn=None, copy=None, |
|
322 | 322 | copysourcematch=None, hunksfilterfn=None): |
|
323 | 323 | """Returns a diff generator for the given contexts and matcher""" |
|
324 | 324 | if ctx2 is None: |
|
325 | 325 | ctx2 = self.p1() |
|
326 | 326 | if ctx2 is not None: |
|
327 | 327 | ctx2 = self._repo[ctx2] |
|
328 | 328 | return patch.diff(self._repo, ctx2, self, match=match, changes=changes, |
|
329 | 329 | opts=opts, losedatafn=losedatafn, pathfn=pathfn, |
|
330 | 330 | copy=copy, copysourcematch=copysourcematch, |
|
331 | 331 | hunksfilterfn=hunksfilterfn) |
|
332 | 332 | |
|
333 | 333 | def dirs(self): |
|
334 | 334 | return self._manifest.dirs() |
|
335 | 335 | |
|
336 | 336 | def hasdir(self, dir): |
|
337 | 337 | return self._manifest.hasdir(dir) |
|
338 | 338 | |
|
339 | 339 | def status(self, other=None, match=None, listignored=False, |
|
340 | 340 | listclean=False, listunknown=False, listsubrepos=False): |
|
341 | 341 | """return status of files between two nodes or node and working |
|
342 | 342 | directory. |
|
343 | 343 | |
|
344 | 344 | If other is None, compare this node with working directory. |
|
345 | 345 | |
|
346 | 346 | returns (modified, added, removed, deleted, unknown, ignored, clean) |
|
347 | 347 | """ |
|
348 | 348 | |
|
349 | 349 | ctx1 = self |
|
350 | 350 | ctx2 = self._repo[other] |
|
351 | 351 | |
|
352 | 352 | # This next code block is, admittedly, fragile logic that tests for |
|
353 | 353 | # reversing the contexts and wouldn't need to exist if it weren't for |
|
354 | 354 | # the fast (and common) code path of comparing the working directory |
|
355 | 355 | # with its first parent. |
|
356 | 356 | # |
|
357 | 357 | # What we're aiming for here is the ability to call: |
|
358 | 358 | # |
|
359 | 359 | # workingctx.status(parentctx) |
|
360 | 360 | # |
|
361 | 361 | # If we always built the manifest for each context and compared those, |
|
362 | 362 | # then we'd be done. But the special case of the above call means we |
|
363 | 363 | # just copy the manifest of the parent. |
|
364 | 364 | reversed = False |
|
365 | 365 | if (not isinstance(ctx1, changectx) |
|
366 | 366 | and isinstance(ctx2, changectx)): |
|
367 | 367 | reversed = True |
|
368 | 368 | ctx1, ctx2 = ctx2, ctx1 |
|
369 | 369 | |
|
370 | 370 | match = self._repo.narrowmatch(match) |
|
371 | 371 | match = ctx2._matchstatus(ctx1, match) |
|
372 | 372 | r = scmutil.status([], [], [], [], [], [], []) |
|
373 | 373 | r = ctx2._buildstatus(ctx1, r, match, listignored, listclean, |
|
374 | 374 | listunknown) |
|
375 | 375 | |
|
376 | 376 | if reversed: |
|
377 | 377 | # Reverse added and removed. Clear deleted, unknown and ignored as |
|
378 | 378 | # these make no sense to reverse. |
|
379 | 379 | r = scmutil.status(r.modified, r.removed, r.added, [], [], [], |
|
380 | 380 | r.clean) |
|
381 | 381 | |
|
382 | 382 | if listsubrepos: |
|
383 | 383 | for subpath, sub in scmutil.itersubrepos(ctx1, ctx2): |
|
384 | 384 | try: |
|
385 | 385 | rev2 = ctx2.subrev(subpath) |
|
386 | 386 | except KeyError: |
|
387 | 387 | # A subrepo that existed in node1 was deleted between |
|
388 | 388 | # node1 and node2 (inclusive). Thus, ctx2's substate |
|
389 | 389 | # won't contain that subpath. The best we can do ignore it. |
|
390 | 390 | rev2 = None |
|
391 | 391 | submatch = matchmod.subdirmatcher(subpath, match) |
|
392 | 392 | s = sub.status(rev2, match=submatch, ignored=listignored, |
|
393 | 393 | clean=listclean, unknown=listunknown, |
|
394 | 394 | listsubrepos=True) |
|
395 | 395 | for rfiles, sfiles in zip(r, s): |
|
396 | 396 | rfiles.extend("%s/%s" % (subpath, f) for f in sfiles) |
|
397 | 397 | |
|
398 | 398 | for l in r: |
|
399 | 399 | l.sort() |
|
400 | 400 | |
|
401 | 401 | return r |
|
402 | 402 | |
|
403 | 403 | class changectx(basectx): |
|
404 | 404 | """A changecontext object makes access to data related to a particular |
|
405 | 405 | changeset convenient. It represents a read-only context already present in |
|
406 | 406 | the repo.""" |
|
407 | 407 | def __init__(self, repo, rev, node): |
|
408 | 408 | super(changectx, self).__init__(repo) |
|
409 | 409 | self._rev = rev |
|
410 | 410 | self._node = node |
|
411 | 411 | |
|
412 | 412 | def __hash__(self): |
|
413 | 413 | try: |
|
414 | 414 | return hash(self._rev) |
|
415 | 415 | except AttributeError: |
|
416 | 416 | return id(self) |
|
417 | 417 | |
|
418 | 418 | def __nonzero__(self): |
|
419 | 419 | return self._rev != nullrev |
|
420 | 420 | |
|
421 | 421 | __bool__ = __nonzero__ |
|
422 | 422 | |
|
423 | 423 | @propertycache |
|
424 | 424 | def _changeset(self): |
|
425 | 425 | return self._repo.changelog.changelogrevision(self.rev()) |
|
426 | 426 | |
|
427 | 427 | @propertycache |
|
428 | 428 | def _manifest(self): |
|
429 | 429 | return self._manifestctx.read() |
|
430 | 430 | |
|
431 | 431 | @property |
|
432 | 432 | def _manifestctx(self): |
|
433 | 433 | return self._repo.manifestlog[self._changeset.manifest] |
|
434 | 434 | |
|
435 | 435 | @propertycache |
|
436 | 436 | def _manifestdelta(self): |
|
437 | 437 | return self._manifestctx.readdelta() |
|
438 | 438 | |
|
439 | 439 | @propertycache |
|
440 | 440 | def _parents(self): |
|
441 | 441 | repo = self._repo |
|
442 | 442 | p1, p2 = repo.changelog.parentrevs(self._rev) |
|
443 | 443 | if p2 == nullrev: |
|
444 | 444 | return [repo[p1]] |
|
445 | 445 | return [repo[p1], repo[p2]] |
|
446 | 446 | |
|
447 | 447 | def changeset(self): |
|
448 | 448 | c = self._changeset |
|
449 | 449 | return ( |
|
450 | 450 | c.manifest, |
|
451 | 451 | c.user, |
|
452 | 452 | c.date, |
|
453 | 453 | c.files, |
|
454 | 454 | c.description, |
|
455 | 455 | c.extra, |
|
456 | 456 | ) |
|
457 | 457 | def manifestnode(self): |
|
458 | 458 | return self._changeset.manifest |
|
459 | 459 | |
|
460 | 460 | def user(self): |
|
461 | 461 | return self._changeset.user |
|
462 | 462 | def date(self): |
|
463 | 463 | return self._changeset.date |
|
464 | 464 | def files(self): |
|
465 | 465 | return self._changeset.files |
|
466 | 466 | def filesmodified(self): |
|
467 | 467 | modified = set(self.files()) |
|
468 | 468 | modified.difference_update(self.filesadded()) |
|
469 | 469 | modified.difference_update(self.filesremoved()) |
|
470 | 470 | return sorted(modified) |
|
471 | 471 | def filesadded(self): |
|
472 | 472 | source = self._repo.ui.config('experimental', 'copies.read-from') |
|
473 | 473 | if (source == 'changeset-only' or |
|
474 | 474 | (source == 'compatibility' and |
|
475 | 475 | self._changeset.filesadded is not None)): |
|
476 | 476 | return self._changeset.filesadded or [] |
|
477 | 477 | |
|
478 | 478 | added = [] |
|
479 | 479 | for f in self.files(): |
|
480 | 480 | if not any(f in p for p in self.parents()): |
|
481 | 481 | added.append(f) |
|
482 | 482 | return added |
|
483 | 483 | def filesremoved(self): |
|
484 | 484 | source = self._repo.ui.config('experimental', 'copies.read-from') |
|
485 | 485 | if (source == 'changeset-only' or |
|
486 | 486 | (source == 'compatibility' and |
|
487 | 487 | self._changeset.filesremoved is not None)): |
|
488 | 488 | return self._changeset.filesremoved or [] |
|
489 | 489 | |
|
490 | 490 | removed = [] |
|
491 | 491 | for f in self.files(): |
|
492 | 492 | if f not in self: |
|
493 | 493 | removed.append(f) |
|
494 | 494 | return removed |
|
495 | 495 | |
|
496 | 496 | @propertycache |
|
497 | 497 | def _copies(self): |
|
498 | 498 | source = self._repo.ui.config('experimental', 'copies.read-from') |
|
499 | 499 | p1copies = self._changeset.p1copies |
|
500 | 500 | p2copies = self._changeset.p2copies |
|
501 | 501 | # If config says to get copy metadata only from changeset, then return |
|
502 | 502 | # that, defaulting to {} if there was no copy metadata. |
|
503 | 503 | # In compatibility mode, we return copy data from the changeset if |
|
504 | 504 | # it was recorded there, and otherwise we fall back to getting it from |
|
505 | 505 | # the filelogs (below). |
|
506 | 506 | if (source == 'changeset-only' or |
|
507 | 507 | (source == 'compatibility' and p1copies is not None)): |
|
508 | 508 | return p1copies or {}, p2copies or {} |
|
509 | 509 | |
|
510 | 510 | # Otherwise (config said to read only from filelog, or we are in |
|
511 | 511 | # compatiblity mode and there is not data in the changeset), we get |
|
512 | 512 | # the copy metadata from the filelogs. |
|
513 | 513 | return super(changectx, self)._copies |
|
514 | 514 | def description(self): |
|
515 | 515 | return self._changeset.description |
|
516 | 516 | def branch(self): |
|
517 | 517 | return encoding.tolocal(self._changeset.extra.get("branch")) |
|
518 | 518 | def closesbranch(self): |
|
519 | 519 | return 'close' in self._changeset.extra |
|
520 | 520 | def extra(self): |
|
521 | 521 | """Return a dict of extra information.""" |
|
522 | 522 | return self._changeset.extra |
|
523 | 523 | def tags(self): |
|
524 | 524 | """Return a list of byte tag names""" |
|
525 | 525 | return self._repo.nodetags(self._node) |
|
526 | 526 | def bookmarks(self): |
|
527 | 527 | """Return a list of byte bookmark names.""" |
|
528 | 528 | return self._repo.nodebookmarks(self._node) |
|
529 | 529 | def phase(self): |
|
530 | 530 | return self._repo._phasecache.phase(self._repo, self._rev) |
|
531 | 531 | def hidden(self): |
|
532 | 532 | return self._rev in repoview.filterrevs(self._repo, 'visible') |
|
533 | 533 | |
|
534 | 534 | def isinmemory(self): |
|
535 | 535 | return False |
|
536 | 536 | |
|
537 | 537 | def children(self): |
|
538 | 538 | """return list of changectx contexts for each child changeset. |
|
539 | 539 | |
|
540 | 540 | This returns only the immediate child changesets. Use descendants() to |
|
541 | 541 | recursively walk children. |
|
542 | 542 | """ |
|
543 | 543 | c = self._repo.changelog.children(self._node) |
|
544 | 544 | return [self._repo[x] for x in c] |
|
545 | 545 | |
|
546 | 546 | def ancestors(self): |
|
547 | 547 | for a in self._repo.changelog.ancestors([self._rev]): |
|
548 | 548 | yield self._repo[a] |
|
549 | 549 | |
|
550 | 550 | def descendants(self): |
|
551 | 551 | """Recursively yield all children of the changeset. |
|
552 | 552 | |
|
553 | 553 | For just the immediate children, use children() |
|
554 | 554 | """ |
|
555 | 555 | for d in self._repo.changelog.descendants([self._rev]): |
|
556 | 556 | yield self._repo[d] |
|
557 | 557 | |
|
558 | 558 | def filectx(self, path, fileid=None, filelog=None): |
|
559 | 559 | """get a file context from this changeset""" |
|
560 | 560 | if fileid is None: |
|
561 | 561 | fileid = self.filenode(path) |
|
562 | 562 | return filectx(self._repo, path, fileid=fileid, |
|
563 | 563 | changectx=self, filelog=filelog) |
|
564 | 564 | |
|
565 | 565 | def ancestor(self, c2, warn=False): |
|
566 | 566 | """return the "best" ancestor context of self and c2 |
|
567 | 567 | |
|
568 | 568 | If there are multiple candidates, it will show a message and check |
|
569 | 569 | merge.preferancestor configuration before falling back to the |
|
570 | 570 | revlog ancestor.""" |
|
571 | 571 | # deal with workingctxs |
|
572 | 572 | n2 = c2._node |
|
573 | 573 | if n2 is None: |
|
574 | 574 | n2 = c2._parents[0]._node |
|
575 | 575 | cahs = self._repo.changelog.commonancestorsheads(self._node, n2) |
|
576 | 576 | if not cahs: |
|
577 | 577 | anc = nullid |
|
578 | 578 | elif len(cahs) == 1: |
|
579 | 579 | anc = cahs[0] |
|
580 | 580 | else: |
|
581 | 581 | # experimental config: merge.preferancestor |
|
582 | 582 | for r in self._repo.ui.configlist('merge', 'preferancestor'): |
|
583 | 583 | try: |
|
584 | 584 | ctx = scmutil.revsymbol(self._repo, r) |
|
585 | 585 | except error.RepoLookupError: |
|
586 | 586 | continue |
|
587 | 587 | anc = ctx.node() |
|
588 | 588 | if anc in cahs: |
|
589 | 589 | break |
|
590 | 590 | else: |
|
591 | 591 | anc = self._repo.changelog.ancestor(self._node, n2) |
|
592 | 592 | if warn: |
|
593 | 593 | self._repo.ui.status( |
|
594 | 594 | (_("note: using %s as ancestor of %s and %s\n") % |
|
595 | 595 | (short(anc), short(self._node), short(n2))) + |
|
596 | 596 | ''.join(_(" alternatively, use --config " |
|
597 | 597 | "merge.preferancestor=%s\n") % |
|
598 | 598 | short(n) for n in sorted(cahs) if n != anc)) |
|
599 | 599 | return self._repo[anc] |
|
600 | 600 | |
|
601 | 601 | def isancestorof(self, other): |
|
602 | 602 | """True if this changeset is an ancestor of other""" |
|
603 | 603 | return self._repo.changelog.isancestorrev(self._rev, other._rev) |
|
604 | 604 | |
|
605 | 605 | def walk(self, match): |
|
606 | 606 | '''Generates matching file names.''' |
|
607 | 607 | |
|
608 | 608 | # Wrap match.bad method to have message with nodeid |
|
609 | 609 | def bad(fn, msg): |
|
610 | 610 | # The manifest doesn't know about subrepos, so don't complain about |
|
611 | 611 | # paths into valid subrepos. |
|
612 | 612 | if any(fn == s or fn.startswith(s + '/') |
|
613 | 613 | for s in self.substate): |
|
614 | 614 | return |
|
615 | 615 | match.bad(fn, _('no such file in rev %s') % self) |
|
616 | 616 | |
|
617 | 617 | m = matchmod.badmatch(self._repo.narrowmatch(match), bad) |
|
618 | 618 | return self._manifest.walk(m) |
|
619 | 619 | |
|
620 | 620 | def matches(self, match): |
|
621 | 621 | return self.walk(match) |
|
622 | 622 | |
|
623 | 623 | class basefilectx(object): |
|
624 | 624 | """A filecontext object represents the common logic for its children: |
|
625 | 625 | filectx: read-only access to a filerevision that is already present |
|
626 | 626 | in the repo, |
|
627 | 627 | workingfilectx: a filecontext that represents files from the working |
|
628 | 628 | directory, |
|
629 | 629 | memfilectx: a filecontext that represents files in-memory, |
|
630 | 630 | """ |
|
631 | 631 | @propertycache |
|
632 | 632 | def _filelog(self): |
|
633 | 633 | return self._repo.file(self._path) |
|
634 | 634 | |
|
635 | 635 | @propertycache |
|
636 | 636 | def _changeid(self): |
|
637 | 637 | if r'_changectx' in self.__dict__: |
|
638 | 638 | return self._changectx.rev() |
|
639 | 639 | elif r'_descendantrev' in self.__dict__: |
|
640 | 640 | # this file context was created from a revision with a known |
|
641 | 641 | # descendant, we can (lazily) correct for linkrev aliases |
|
642 | 642 | return self._adjustlinkrev(self._descendantrev) |
|
643 | 643 | else: |
|
644 | 644 | return self._filelog.linkrev(self._filerev) |
|
645 | 645 | |
|
646 | 646 | @propertycache |
|
647 | 647 | def _filenode(self): |
|
648 | 648 | if r'_fileid' in self.__dict__: |
|
649 | 649 | return self._filelog.lookup(self._fileid) |
|
650 | 650 | else: |
|
651 | 651 | return self._changectx.filenode(self._path) |
|
652 | 652 | |
|
653 | 653 | @propertycache |
|
654 | 654 | def _filerev(self): |
|
655 | 655 | return self._filelog.rev(self._filenode) |
|
656 | 656 | |
|
657 | 657 | @propertycache |
|
658 | 658 | def _repopath(self): |
|
659 | 659 | return self._path |
|
660 | 660 | |
|
661 | 661 | def __nonzero__(self): |
|
662 | 662 | try: |
|
663 | 663 | self._filenode |
|
664 | 664 | return True |
|
665 | 665 | except error.LookupError: |
|
666 | 666 | # file is missing |
|
667 | 667 | return False |
|
668 | 668 | |
|
669 | 669 | __bool__ = __nonzero__ |
|
670 | 670 | |
|
671 | 671 | def __bytes__(self): |
|
672 | 672 | try: |
|
673 | 673 | return "%s@%s" % (self.path(), self._changectx) |
|
674 | 674 | except error.LookupError: |
|
675 | 675 | return "%s@???" % self.path() |
|
676 | 676 | |
|
677 | 677 | __str__ = encoding.strmethod(__bytes__) |
|
678 | 678 | |
|
679 | 679 | def __repr__(self): |
|
680 | 680 | return r"<%s %s>" % (type(self).__name__, str(self)) |
|
681 | 681 | |
|
682 | 682 | def __hash__(self): |
|
683 | 683 | try: |
|
684 | 684 | return hash((self._path, self._filenode)) |
|
685 | 685 | except AttributeError: |
|
686 | 686 | return id(self) |
|
687 | 687 | |
|
688 | 688 | def __eq__(self, other): |
|
689 | 689 | try: |
|
690 | 690 | return (type(self) == type(other) and self._path == other._path |
|
691 | 691 | and self._filenode == other._filenode) |
|
692 | 692 | except AttributeError: |
|
693 | 693 | return False |
|
694 | 694 | |
|
695 | 695 | def __ne__(self, other): |
|
696 | 696 | return not (self == other) |
|
697 | 697 | |
|
698 | 698 | def filerev(self): |
|
699 | 699 | return self._filerev |
|
700 | 700 | def filenode(self): |
|
701 | 701 | return self._filenode |
|
702 | 702 | @propertycache |
|
703 | 703 | def _flags(self): |
|
704 | 704 | return self._changectx.flags(self._path) |
|
705 | 705 | def flags(self): |
|
706 | 706 | return self._flags |
|
707 | 707 | def filelog(self): |
|
708 | 708 | return self._filelog |
|
709 | 709 | def rev(self): |
|
710 | 710 | return self._changeid |
|
711 | 711 | def linkrev(self): |
|
712 | 712 | return self._filelog.linkrev(self._filerev) |
|
713 | 713 | def node(self): |
|
714 | 714 | return self._changectx.node() |
|
715 | 715 | def hex(self): |
|
716 | 716 | return self._changectx.hex() |
|
717 | 717 | def user(self): |
|
718 | 718 | return self._changectx.user() |
|
719 | 719 | def date(self): |
|
720 | 720 | return self._changectx.date() |
|
721 | 721 | def files(self): |
|
722 | 722 | return self._changectx.files() |
|
723 | 723 | def description(self): |
|
724 | 724 | return self._changectx.description() |
|
725 | 725 | def branch(self): |
|
726 | 726 | return self._changectx.branch() |
|
727 | 727 | def extra(self): |
|
728 | 728 | return self._changectx.extra() |
|
729 | 729 | def phase(self): |
|
730 | 730 | return self._changectx.phase() |
|
731 | 731 | def phasestr(self): |
|
732 | 732 | return self._changectx.phasestr() |
|
733 | 733 | def obsolete(self): |
|
734 | 734 | return self._changectx.obsolete() |
|
735 | 735 | def instabilities(self): |
|
736 | 736 | return self._changectx.instabilities() |
|
737 | 737 | def manifest(self): |
|
738 | 738 | return self._changectx.manifest() |
|
739 | 739 | def changectx(self): |
|
740 | 740 | return self._changectx |
|
741 | 741 | def renamed(self): |
|
742 | 742 | return self._copied |
|
743 | 743 | def copysource(self): |
|
744 | 744 | return self._copied and self._copied[0] |
|
745 | 745 | def repo(self): |
|
746 | 746 | return self._repo |
|
747 | 747 | def size(self): |
|
748 | 748 | return len(self.data()) |
|
749 | 749 | |
|
750 | 750 | def path(self): |
|
751 | 751 | return self._path |
|
752 | 752 | |
|
753 | 753 | def isbinary(self): |
|
754 | 754 | try: |
|
755 | 755 | return stringutil.binary(self.data()) |
|
756 | 756 | except IOError: |
|
757 | 757 | return False |
|
758 | 758 | def isexec(self): |
|
759 | 759 | return 'x' in self.flags() |
|
760 | 760 | def islink(self): |
|
761 | 761 | return 'l' in self.flags() |
|
762 | 762 | |
|
763 | 763 | def isabsent(self): |
|
764 | 764 | """whether this filectx represents a file not in self._changectx |
|
765 | 765 | |
|
766 | 766 | This is mainly for merge code to detect change/delete conflicts. This is |
|
767 | 767 | expected to be True for all subclasses of basectx.""" |
|
768 | 768 | return False |
|
769 | 769 | |
|
770 | 770 | _customcmp = False |
|
771 | 771 | def cmp(self, fctx): |
|
772 | 772 | """compare with other file context |
|
773 | 773 | |
|
774 | 774 | returns True if different than fctx. |
|
775 | 775 | """ |
|
776 | 776 | if fctx._customcmp: |
|
777 | 777 | return fctx.cmp(self) |
|
778 | 778 | |
|
779 | 779 | if self._filenode is None: |
|
780 | 780 | raise error.ProgrammingError( |
|
781 | 781 | 'filectx.cmp() must be reimplemented if not backed by revlog') |
|
782 | 782 | |
|
783 | 783 | if fctx._filenode is None: |
|
784 | 784 | if self._repo._encodefilterpats: |
|
785 | 785 | # can't rely on size() because wdir content may be decoded |
|
786 | 786 | return self._filelog.cmp(self._filenode, fctx.data()) |
|
787 | 787 | if self.size() - 4 == fctx.size(): |
|
788 | 788 | # size() can match: |
|
789 | 789 | # if file data starts with '\1\n', empty metadata block is |
|
790 | 790 | # prepended, which adds 4 bytes to filelog.size(). |
|
791 | 791 | return self._filelog.cmp(self._filenode, fctx.data()) |
|
792 | 792 | if self.size() == fctx.size(): |
|
793 | 793 | # size() matches: need to compare content |
|
794 | 794 | return self._filelog.cmp(self._filenode, fctx.data()) |
|
795 | 795 | |
|
796 | 796 | # size() differs |
|
797 | 797 | return True |
|
798 | 798 | |
|
799 | 799 | def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None): |
|
800 | 800 | """return the first ancestor of <srcrev> introducing <fnode> |
|
801 | 801 | |
|
802 | 802 | If the linkrev of the file revision does not point to an ancestor of |
|
803 | 803 | srcrev, we'll walk down the ancestors until we find one introducing |
|
804 | 804 | this file revision. |
|
805 | 805 | |
|
806 | 806 | :srcrev: the changeset revision we search ancestors from |
|
807 | 807 | :inclusive: if true, the src revision will also be checked |
|
808 | 808 | :stoprev: an optional revision to stop the walk at. If no introduction |
|
809 | 809 | of this file content could be found before this floor |
|
810 | 810 | revision, the function will returns "None" and stops its |
|
811 | 811 | iteration. |
|
812 | 812 | """ |
|
813 | 813 | repo = self._repo |
|
814 | 814 | cl = repo.unfiltered().changelog |
|
815 | 815 | mfl = repo.manifestlog |
|
816 | 816 | # fetch the linkrev |
|
817 | 817 | lkr = self.linkrev() |
|
818 | 818 | if srcrev == lkr: |
|
819 | 819 | return lkr |
|
820 | 820 | # hack to reuse ancestor computation when searching for renames |
|
821 | 821 | memberanc = getattr(self, '_ancestrycontext', None) |
|
822 | 822 | iteranc = None |
|
823 | 823 | if srcrev is None: |
|
824 | 824 | # wctx case, used by workingfilectx during mergecopy |
|
825 | 825 | revs = [p.rev() for p in self._repo[None].parents()] |
|
826 | 826 | inclusive = True # we skipped the real (revless) source |
|
827 | 827 | else: |
|
828 | 828 | revs = [srcrev] |
|
829 | 829 | if memberanc is None: |
|
830 | 830 | memberanc = iteranc = cl.ancestors(revs, lkr, |
|
831 | 831 | inclusive=inclusive) |
|
832 | 832 | # check if this linkrev is an ancestor of srcrev |
|
833 | 833 | if lkr not in memberanc: |
|
834 | 834 | if iteranc is None: |
|
835 | 835 | iteranc = cl.ancestors(revs, lkr, inclusive=inclusive) |
|
836 | 836 | fnode = self._filenode |
|
837 | 837 | path = self._path |
|
838 | 838 | for a in iteranc: |
|
839 | 839 | if stoprev is not None and a < stoprev: |
|
840 | 840 | return None |
|
841 | 841 | ac = cl.read(a) # get changeset data (we avoid object creation) |
|
842 | 842 | if path in ac[3]: # checking the 'files' field. |
|
843 | 843 | # The file has been touched, check if the content is |
|
844 | 844 | # similar to the one we search for. |
|
845 | 845 | if fnode == mfl[ac[0]].readfast().get(path): |
|
846 | 846 | return a |
|
847 | 847 | # In theory, we should never get out of that loop without a result. |
|
848 | 848 | # But if manifest uses a buggy file revision (not children of the |
|
849 | 849 | # one it replaces) we could. Such a buggy situation will likely |
|
850 | 850 | # result is crash somewhere else at to some point. |
|
851 | 851 | return lkr |
|
852 | 852 | |
|
853 | 853 | def isintroducedafter(self, changelogrev): |
|
854 | 854 | """True if a filectx has been introduced after a given floor revision |
|
855 | 855 | """ |
|
856 | 856 | if self.linkrev() >= changelogrev: |
|
857 | 857 | return True |
|
858 | 858 | introrev = self._introrev(stoprev=changelogrev) |
|
859 | 859 | if introrev is None: |
|
860 | 860 | return False |
|
861 | 861 | return introrev >= changelogrev |
|
862 | 862 | |
|
863 | 863 | def introrev(self): |
|
864 | 864 | """return the rev of the changeset which introduced this file revision |
|
865 | 865 | |
|
866 | 866 | This method is different from linkrev because it take into account the |
|
867 | 867 | changeset the filectx was created from. It ensures the returned |
|
868 | 868 | revision is one of its ancestors. This prevents bugs from |
|
869 | 869 | 'linkrev-shadowing' when a file revision is used by multiple |
|
870 | 870 | changesets. |
|
871 | 871 | """ |
|
872 | 872 | return self._introrev() |
|
873 | 873 | |
|
874 | 874 | def _introrev(self, stoprev=None): |
|
875 | 875 | """ |
|
876 | 876 | Same as `introrev` but, with an extra argument to limit changelog |
|
877 | 877 | iteration range in some internal usecase. |
|
878 | 878 | |
|
879 | 879 | If `stoprev` is set, the `introrev` will not be searched past that |
|
880 | 880 | `stoprev` revision and "None" might be returned. This is useful to |
|
881 | 881 | limit the iteration range. |
|
882 | 882 | """ |
|
883 | 883 | toprev = None |
|
884 | 884 | attrs = vars(self) |
|
885 | 885 | if r'_changeid' in attrs: |
|
886 | 886 | # We have a cached value already |
|
887 | 887 | toprev = self._changeid |
|
888 | 888 | elif r'_changectx' in attrs: |
|
889 | 889 | # We know which changelog entry we are coming from |
|
890 | 890 | toprev = self._changectx.rev() |
|
891 | 891 | |
|
892 | 892 | if toprev is not None: |
|
893 | 893 | return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev) |
|
894 | 894 | elif r'_descendantrev' in attrs: |
|
895 | 895 | introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev) |
|
896 | 896 | # be nice and cache the result of the computation |
|
897 | 897 | if introrev is not None: |
|
898 | 898 | self._changeid = introrev |
|
899 | 899 | return introrev |
|
900 | 900 | else: |
|
901 | 901 | return self.linkrev() |
|
902 | 902 | |
|
903 | 903 | def introfilectx(self): |
|
904 | 904 | """Return filectx having identical contents, but pointing to the |
|
905 | 905 | changeset revision where this filectx was introduced""" |
|
906 | 906 | introrev = self.introrev() |
|
907 | 907 | if self.rev() == introrev: |
|
908 | 908 | return self |
|
909 | 909 | return self.filectx(self.filenode(), changeid=introrev) |
|
910 | 910 | |
|
911 | 911 | def _parentfilectx(self, path, fileid, filelog): |
|
912 | 912 | """create parent filectx keeping ancestry info for _adjustlinkrev()""" |
|
913 | 913 | fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog) |
|
914 | 914 | if r'_changeid' in vars(self) or r'_changectx' in vars(self): |
|
915 | 915 | # If self is associated with a changeset (probably explicitly |
|
916 | 916 | # fed), ensure the created filectx is associated with a |
|
917 | 917 | # changeset that is an ancestor of self.changectx. |
|
918 | 918 | # This lets us later use _adjustlinkrev to get a correct link. |
|
919 | 919 | fctx._descendantrev = self.rev() |
|
920 | 920 | fctx._ancestrycontext = getattr(self, '_ancestrycontext', None) |
|
921 | 921 | elif r'_descendantrev' in vars(self): |
|
922 | 922 | # Otherwise propagate _descendantrev if we have one associated. |
|
923 | 923 | fctx._descendantrev = self._descendantrev |
|
924 | 924 | fctx._ancestrycontext = getattr(self, '_ancestrycontext', None) |
|
925 | 925 | return fctx |
|
926 | 926 | |
|
927 | 927 | def parents(self): |
|
928 | 928 | _path = self._path |
|
929 | 929 | fl = self._filelog |
|
930 | 930 | parents = self._filelog.parents(self._filenode) |
|
931 | 931 | pl = [(_path, node, fl) for node in parents if node != nullid] |
|
932 | 932 | |
|
933 | 933 | r = fl.renamed(self._filenode) |
|
934 | 934 | if r: |
|
935 | 935 | # - In the simple rename case, both parent are nullid, pl is empty. |
|
936 | 936 | # - In case of merge, only one of the parent is null id and should |
|
937 | 937 | # be replaced with the rename information. This parent is -always- |
|
938 | 938 | # the first one. |
|
939 | 939 | # |
|
940 | 940 | # As null id have always been filtered out in the previous list |
|
941 | 941 | # comprehension, inserting to 0 will always result in "replacing |
|
942 | 942 | # first nullid parent with rename information. |
|
943 | 943 | pl.insert(0, (r[0], r[1], self._repo.file(r[0]))) |
|
944 | 944 | |
|
945 | 945 | return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl] |
|
946 | 946 | |
|
947 | 947 | def p1(self): |
|
948 | 948 | return self.parents()[0] |
|
949 | 949 | |
|
950 | 950 | def p2(self): |
|
951 | 951 | p = self.parents() |
|
952 | 952 | if len(p) == 2: |
|
953 | 953 | return p[1] |
|
954 | 954 | return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog) |
|
955 | 955 | |
|
956 | 956 | def annotate(self, follow=False, skiprevs=None, diffopts=None): |
|
957 | 957 | """Returns a list of annotateline objects for each line in the file |
|
958 | 958 | |
|
959 | 959 | - line.fctx is the filectx of the node where that line was last changed |
|
960 | 960 | - line.lineno is the line number at the first appearance in the managed |
|
961 | 961 | file |
|
962 | 962 | - line.text is the data on that line (including newline character) |
|
963 | 963 | """ |
|
964 | 964 | getlog = util.lrucachefunc(lambda x: self._repo.file(x)) |
|
965 | 965 | |
|
966 | 966 | def parents(f): |
|
967 | 967 | # Cut _descendantrev here to mitigate the penalty of lazy linkrev |
|
968 | 968 | # adjustment. Otherwise, p._adjustlinkrev() would walk changelog |
|
969 | 969 | # from the topmost introrev (= srcrev) down to p.linkrev() if it |
|
970 | 970 | # isn't an ancestor of the srcrev. |
|
971 | 971 | f._changeid |
|
972 | 972 | pl = f.parents() |
|
973 | 973 | |
|
974 | 974 | # Don't return renamed parents if we aren't following. |
|
975 | 975 | if not follow: |
|
976 | 976 | pl = [p for p in pl if p.path() == f.path()] |
|
977 | 977 | |
|
978 | 978 | # renamed filectx won't have a filelog yet, so set it |
|
979 | 979 | # from the cache to save time |
|
980 | 980 | for p in pl: |
|
981 | 981 | if not r'_filelog' in p.__dict__: |
|
982 | 982 | p._filelog = getlog(p.path()) |
|
983 | 983 | |
|
984 | 984 | return pl |
|
985 | 985 | |
|
986 | 986 | # use linkrev to find the first changeset where self appeared |
|
987 | 987 | base = self.introfilectx() |
|
988 | 988 | if getattr(base, '_ancestrycontext', None) is None: |
|
989 | 989 | cl = self._repo.changelog |
|
990 | 990 | if base.rev() is None: |
|
991 | 991 | # wctx is not inclusive, but works because _ancestrycontext |
|
992 | 992 | # is used to test filelog revisions |
|
993 | 993 | ac = cl.ancestors([p.rev() for p in base.parents()], |
|
994 | 994 | inclusive=True) |
|
995 | 995 | else: |
|
996 | 996 | ac = cl.ancestors([base.rev()], inclusive=True) |
|
997 | 997 | base._ancestrycontext = ac |
|
998 | 998 | |
|
999 | 999 | return dagop.annotate(base, parents, skiprevs=skiprevs, |
|
1000 | 1000 | diffopts=diffopts) |
|
1001 | 1001 | |
|
1002 | 1002 | def ancestors(self, followfirst=False): |
|
1003 | 1003 | visit = {} |
|
1004 | 1004 | c = self |
|
1005 | 1005 | if followfirst: |
|
1006 | 1006 | cut = 1 |
|
1007 | 1007 | else: |
|
1008 | 1008 | cut = None |
|
1009 | 1009 | |
|
1010 | 1010 | while True: |
|
1011 | 1011 | for parent in c.parents()[:cut]: |
|
1012 | 1012 | visit[(parent.linkrev(), parent.filenode())] = parent |
|
1013 | 1013 | if not visit: |
|
1014 | 1014 | break |
|
1015 | 1015 | c = visit.pop(max(visit)) |
|
1016 | 1016 | yield c |
|
1017 | 1017 | |
|
1018 | 1018 | def decodeddata(self): |
|
1019 | 1019 | """Returns `data()` after running repository decoding filters. |
|
1020 | 1020 | |
|
1021 | 1021 | This is often equivalent to how the data would be expressed on disk. |
|
1022 | 1022 | """ |
|
1023 | 1023 | return self._repo.wwritedata(self.path(), self.data()) |
|
1024 | 1024 | |
|
1025 | 1025 | class filectx(basefilectx): |
|
1026 | 1026 | """A filecontext object makes access to data related to a particular |
|
1027 | 1027 | filerevision convenient.""" |
|
1028 | 1028 | def __init__(self, repo, path, changeid=None, fileid=None, |
|
1029 | 1029 | filelog=None, changectx=None): |
|
1030 | 1030 | """changeid must be a revision number, if specified. |
|
1031 | 1031 | fileid can be a file revision or node.""" |
|
1032 | 1032 | self._repo = repo |
|
1033 | 1033 | self._path = path |
|
1034 | 1034 | |
|
1035 | 1035 | assert (changeid is not None |
|
1036 | 1036 | or fileid is not None |
|
1037 | 1037 | or changectx is not None), ( |
|
1038 | 1038 | "bad args: changeid=%r, fileid=%r, changectx=%r" |
|
1039 | 1039 | % (changeid, fileid, changectx)) |
|
1040 | 1040 | |
|
1041 | 1041 | if filelog is not None: |
|
1042 | 1042 | self._filelog = filelog |
|
1043 | 1043 | |
|
1044 | 1044 | if changeid is not None: |
|
1045 | 1045 | self._changeid = changeid |
|
1046 | 1046 | if changectx is not None: |
|
1047 | 1047 | self._changectx = changectx |
|
1048 | 1048 | if fileid is not None: |
|
1049 | 1049 | self._fileid = fileid |
|
1050 | 1050 | |
|
1051 | 1051 | @propertycache |
|
1052 | 1052 | def _changectx(self): |
|
1053 | 1053 | try: |
|
1054 | 1054 | return self._repo[self._changeid] |
|
1055 | 1055 | except error.FilteredRepoLookupError: |
|
1056 | 1056 | # Linkrev may point to any revision in the repository. When the |
|
1057 | 1057 | # repository is filtered this may lead to `filectx` trying to build |
|
1058 | 1058 | # `changectx` for filtered revision. In such case we fallback to |
|
1059 | 1059 | # creating `changectx` on the unfiltered version of the reposition. |
|
1060 | 1060 | # This fallback should not be an issue because `changectx` from |
|
1061 | 1061 | # `filectx` are not used in complex operations that care about |
|
1062 | 1062 | # filtering. |
|
1063 | 1063 | # |
|
1064 | 1064 | # This fallback is a cheap and dirty fix that prevent several |
|
1065 | 1065 | # crashes. It does not ensure the behavior is correct. However the |
|
1066 | 1066 | # behavior was not correct before filtering either and "incorrect |
|
1067 | 1067 | # behavior" is seen as better as "crash" |
|
1068 | 1068 | # |
|
1069 | 1069 | # Linkrevs have several serious troubles with filtering that are |
|
1070 | 1070 | # complicated to solve. Proper handling of the issue here should be |
|
1071 | 1071 | # considered when solving linkrev issue are on the table. |
|
1072 | 1072 | return self._repo.unfiltered()[self._changeid] |
|
1073 | 1073 | |
|
1074 | 1074 | def filectx(self, fileid, changeid=None): |
|
1075 | 1075 | '''opens an arbitrary revision of the file without |
|
1076 | 1076 | opening a new filelog''' |
|
1077 | 1077 | return filectx(self._repo, self._path, fileid=fileid, |
|
1078 | 1078 | filelog=self._filelog, changeid=changeid) |
|
1079 | 1079 | |
|
1080 | 1080 | def rawdata(self): |
|
1081 | 1081 | return self._filelog.revision(self._filenode, raw=True) |
|
1082 | 1082 | |
|
1083 | 1083 | def rawflags(self): |
|
1084 | 1084 | """low-level revlog flags""" |
|
1085 | 1085 | return self._filelog.flags(self._filerev) |
|
1086 | 1086 | |
|
1087 | 1087 | def data(self): |
|
1088 | 1088 | try: |
|
1089 | 1089 | return self._filelog.read(self._filenode) |
|
1090 | 1090 | except error.CensoredNodeError: |
|
1091 | 1091 | if self._repo.ui.config("censor", "policy") == "ignore": |
|
1092 | 1092 | return "" |
|
1093 | 1093 | raise error.Abort(_("censored node: %s") % short(self._filenode), |
|
1094 | 1094 | hint=_("set censor.policy to ignore errors")) |
|
1095 | 1095 | |
|
1096 | 1096 | def size(self): |
|
1097 | 1097 | return self._filelog.size(self._filerev) |
|
1098 | 1098 | |
|
1099 | 1099 | @propertycache |
|
1100 | 1100 | def _copied(self): |
|
1101 | 1101 | """check if file was actually renamed in this changeset revision |
|
1102 | 1102 | |
|
1103 | 1103 | If rename logged in file revision, we report copy for changeset only |
|
1104 | 1104 | if file revisions linkrev points back to the changeset in question |
|
1105 | 1105 | or both changeset parents contain different file revisions. |
|
1106 | 1106 | """ |
|
1107 | 1107 | |
|
1108 | 1108 | renamed = self._filelog.renamed(self._filenode) |
|
1109 | 1109 | if not renamed: |
|
1110 | 1110 | return None |
|
1111 | 1111 | |
|
1112 | 1112 | if self.rev() == self.linkrev(): |
|
1113 | 1113 | return renamed |
|
1114 | 1114 | |
|
1115 | 1115 | name = self.path() |
|
1116 | 1116 | fnode = self._filenode |
|
1117 | 1117 | for p in self._changectx.parents(): |
|
1118 | 1118 | try: |
|
1119 | 1119 | if fnode == p.filenode(name): |
|
1120 | 1120 | return None |
|
1121 | 1121 | except error.LookupError: |
|
1122 | 1122 | pass |
|
1123 | 1123 | return renamed |
|
1124 | 1124 | |
|
1125 | 1125 | def children(self): |
|
1126 | 1126 | # hard for renames |
|
1127 | 1127 | c = self._filelog.children(self._filenode) |
|
1128 | 1128 | return [filectx(self._repo, self._path, fileid=x, |
|
1129 | 1129 | filelog=self._filelog) for x in c] |
|
1130 | 1130 | |
|
1131 | 1131 | class committablectx(basectx): |
|
1132 | 1132 | """A committablectx object provides common functionality for a context that |
|
1133 | 1133 | wants the ability to commit, e.g. workingctx or memctx.""" |
|
1134 | 1134 | def __init__(self, repo, text="", user=None, date=None, extra=None, |
|
1135 | 1135 | changes=None, branch=None): |
|
1136 | 1136 | super(committablectx, self).__init__(repo) |
|
1137 | 1137 | self._rev = None |
|
1138 | 1138 | self._node = None |
|
1139 | 1139 | self._text = text |
|
1140 | 1140 | if date: |
|
1141 | 1141 | self._date = dateutil.parsedate(date) |
|
1142 | 1142 | if user: |
|
1143 | 1143 | self._user = user |
|
1144 | 1144 | if changes: |
|
1145 | 1145 | self._status = changes |
|
1146 | 1146 | |
|
1147 | 1147 | self._extra = {} |
|
1148 | 1148 | if extra: |
|
1149 | 1149 | self._extra = extra.copy() |
|
1150 | 1150 | if branch is not None: |
|
1151 | 1151 | self._extra['branch'] = encoding.fromlocal(branch) |
|
1152 | 1152 | if not self._extra.get('branch'): |
|
1153 | 1153 | self._extra['branch'] = 'default' |
|
1154 | 1154 | |
|
1155 | 1155 | def __bytes__(self): |
|
1156 | 1156 | return bytes(self._parents[0]) + "+" |
|
1157 | 1157 | |
|
1158 | 1158 | __str__ = encoding.strmethod(__bytes__) |
|
1159 | 1159 | |
|
1160 | 1160 | def __nonzero__(self): |
|
1161 | 1161 | return True |
|
1162 | 1162 | |
|
1163 | 1163 | __bool__ = __nonzero__ |
|
1164 | 1164 | |
|
1165 | 1165 | @propertycache |
|
1166 | 1166 | def _status(self): |
|
1167 | 1167 | return self._repo.status() |
|
1168 | 1168 | |
|
1169 | 1169 | @propertycache |
|
1170 | 1170 | def _user(self): |
|
1171 | 1171 | return self._repo.ui.username() |
|
1172 | 1172 | |
|
1173 | 1173 | @propertycache |
|
1174 | 1174 | def _date(self): |
|
1175 | 1175 | ui = self._repo.ui |
|
1176 | 1176 | date = ui.configdate('devel', 'default-date') |
|
1177 | 1177 | if date is None: |
|
1178 | 1178 | date = dateutil.makedate() |
|
1179 | 1179 | return date |
|
1180 | 1180 | |
|
1181 | 1181 | def subrev(self, subpath): |
|
1182 | 1182 | return None |
|
1183 | 1183 | |
|
1184 | 1184 | def manifestnode(self): |
|
1185 | 1185 | return None |
|
1186 | 1186 | def user(self): |
|
1187 | 1187 | return self._user or self._repo.ui.username() |
|
1188 | 1188 | def date(self): |
|
1189 | 1189 | return self._date |
|
1190 | 1190 | def description(self): |
|
1191 | 1191 | return self._text |
|
1192 | 1192 | def files(self): |
|
1193 | 1193 | return sorted(self._status.modified + self._status.added + |
|
1194 | 1194 | self._status.removed) |
|
1195 | 1195 | def modified(self): |
|
1196 | 1196 | return self._status.modified |
|
1197 | 1197 | def added(self): |
|
1198 | 1198 | return self._status.added |
|
1199 | 1199 | def removed(self): |
|
1200 | 1200 | return self._status.removed |
|
1201 | 1201 | def deleted(self): |
|
1202 | 1202 | return self._status.deleted |
|
1203 | 1203 | filesmodified = modified |
|
1204 | 1204 | filesadded = added |
|
1205 | 1205 | filesremoved = removed |
|
1206 | 1206 | |
|
1207 | 1207 | def branch(self): |
|
1208 | 1208 | return encoding.tolocal(self._extra['branch']) |
|
1209 | 1209 | def closesbranch(self): |
|
1210 | 1210 | return 'close' in self._extra |
|
1211 | 1211 | def extra(self): |
|
1212 | 1212 | return self._extra |
|
1213 | 1213 | |
|
1214 | 1214 | def isinmemory(self): |
|
1215 | 1215 | return False |
|
1216 | 1216 | |
|
1217 | 1217 | def tags(self): |
|
1218 | 1218 | return [] |
|
1219 | 1219 | |
|
1220 | 1220 | def bookmarks(self): |
|
1221 | 1221 | b = [] |
|
1222 | 1222 | for p in self.parents(): |
|
1223 | 1223 | b.extend(p.bookmarks()) |
|
1224 | 1224 | return b |
|
1225 | 1225 | |
|
1226 | 1226 | def phase(self): |
|
1227 | 1227 | phase = phases.draft # default phase to draft |
|
1228 | 1228 | for p in self.parents(): |
|
1229 | 1229 | phase = max(phase, p.phase()) |
|
1230 | 1230 | return phase |
|
1231 | 1231 | |
|
1232 | 1232 | def hidden(self): |
|
1233 | 1233 | return False |
|
1234 | 1234 | |
|
1235 | 1235 | def children(self): |
|
1236 | 1236 | return [] |
|
1237 | 1237 | |
|
1238 | 1238 | def ancestor(self, c2): |
|
1239 | 1239 | """return the "best" ancestor context of self and c2""" |
|
1240 | 1240 | return self._parents[0].ancestor(c2) # punt on two parents for now |
|
1241 | 1241 | |
|
1242 | 1242 | def ancestors(self): |
|
1243 | 1243 | for p in self._parents: |
|
1244 | 1244 | yield p |
|
1245 | 1245 | for a in self._repo.changelog.ancestors( |
|
1246 | 1246 | [p.rev() for p in self._parents]): |
|
1247 | 1247 | yield self._repo[a] |
|
1248 | 1248 | |
|
1249 | 1249 | def markcommitted(self, node): |
|
1250 | 1250 | """Perform post-commit cleanup necessary after committing this ctx |
|
1251 | 1251 | |
|
1252 | 1252 | Specifically, this updates backing stores this working context |
|
1253 | 1253 | wraps to reflect the fact that the changes reflected by this |
|
1254 | 1254 | workingctx have been committed. For example, it marks |
|
1255 | 1255 | modified and added files as normal in the dirstate. |
|
1256 | 1256 | |
|
1257 | 1257 | """ |
|
1258 | 1258 | |
|
1259 | 1259 | def dirty(self, missing=False, merge=True, branch=True): |
|
1260 | 1260 | return False |
|
1261 | 1261 | |
|
1262 | 1262 | class workingctx(committablectx): |
|
1263 | 1263 | """A workingctx object makes access to data related to |
|
1264 | 1264 | the current working directory convenient. |
|
1265 | 1265 | date - any valid date string or (unixtime, offset), or None. |
|
1266 | 1266 | user - username string, or None. |
|
1267 | 1267 | extra - a dictionary of extra values, or None. |
|
1268 | 1268 | changes - a list of file lists as returned by localrepo.status() |
|
1269 | 1269 | or None to use the repository status. |
|
1270 | 1270 | """ |
|
1271 | 1271 | def __init__(self, repo, text="", user=None, date=None, extra=None, |
|
1272 | 1272 | changes=None): |
|
1273 | 1273 | branch = None |
|
1274 | 1274 | if not extra or 'branch' not in extra: |
|
1275 | 1275 | try: |
|
1276 | 1276 | branch = repo.dirstate.branch() |
|
1277 | 1277 | except UnicodeDecodeError: |
|
1278 | 1278 | raise error.Abort(_('branch name not in UTF-8!')) |
|
1279 | 1279 | super(workingctx, self).__init__(repo, text, user, date, extra, changes, |
|
1280 | 1280 | branch=branch) |
|
1281 | 1281 | |
|
1282 | 1282 | def __iter__(self): |
|
1283 | 1283 | d = self._repo.dirstate |
|
1284 | 1284 | for f in d: |
|
1285 | 1285 | if d[f] != 'r': |
|
1286 | 1286 | yield f |
|
1287 | 1287 | |
|
1288 | 1288 | def __contains__(self, key): |
|
1289 | 1289 | return self._repo.dirstate[key] not in "?r" |
|
1290 | 1290 | |
|
1291 | 1291 | def hex(self): |
|
1292 | 1292 | return wdirhex |
|
1293 | 1293 | |
|
1294 | 1294 | @propertycache |
|
1295 | 1295 | def _parents(self): |
|
1296 | 1296 | p = self._repo.dirstate.parents() |
|
1297 | 1297 | if p[1] == nullid: |
|
1298 | 1298 | p = p[:-1] |
|
1299 | 1299 | # use unfiltered repo to delay/avoid loading obsmarkers |
|
1300 | 1300 | unfi = self._repo.unfiltered() |
|
1301 | 1301 | return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p] |
|
1302 | 1302 | |
|
1303 | 1303 | def _fileinfo(self, path): |
|
1304 | 1304 | # populate __dict__['_manifest'] as workingctx has no _manifestdelta |
|
1305 | 1305 | self._manifest |
|
1306 | 1306 | return super(workingctx, self)._fileinfo(path) |
|
1307 | 1307 | |
|
1308 | 1308 | def _buildflagfunc(self): |
|
1309 | 1309 | # Create a fallback function for getting file flags when the |
|
1310 | 1310 | # filesystem doesn't support them |
|
1311 | 1311 | |
|
1312 | 1312 | copiesget = self._repo.dirstate.copies().get |
|
1313 | 1313 | parents = self.parents() |
|
1314 | 1314 | if len(parents) < 2: |
|
1315 | 1315 | # when we have one parent, it's easy: copy from parent |
|
1316 | 1316 | man = parents[0].manifest() |
|
1317 | 1317 | def func(f): |
|
1318 | 1318 | f = copiesget(f, f) |
|
1319 | 1319 | return man.flags(f) |
|
1320 | 1320 | else: |
|
1321 | 1321 | # merges are tricky: we try to reconstruct the unstored |
|
1322 | 1322 | # result from the merge (issue1802) |
|
1323 | 1323 | p1, p2 = parents |
|
1324 | 1324 | pa = p1.ancestor(p2) |
|
1325 | 1325 | m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest() |
|
1326 | 1326 | |
|
1327 | 1327 | def func(f): |
|
1328 | 1328 | f = copiesget(f, f) # may be wrong for merges with copies |
|
1329 | 1329 | fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f) |
|
1330 | 1330 | if fl1 == fl2: |
|
1331 | 1331 | return fl1 |
|
1332 | 1332 | if fl1 == fla: |
|
1333 | 1333 | return fl2 |
|
1334 | 1334 | if fl2 == fla: |
|
1335 | 1335 | return fl1 |
|
1336 | 1336 | return '' # punt for conflicts |
|
1337 | 1337 | |
|
1338 | 1338 | return func |
|
1339 | 1339 | |
|
1340 | 1340 | @propertycache |
|
1341 | 1341 | def _flagfunc(self): |
|
1342 | 1342 | return self._repo.dirstate.flagfunc(self._buildflagfunc) |
|
1343 | 1343 | |
|
1344 | 1344 | def flags(self, path): |
|
1345 | 1345 | if r'_manifest' in self.__dict__: |
|
1346 | 1346 | try: |
|
1347 | 1347 | return self._manifest.flags(path) |
|
1348 | 1348 | except KeyError: |
|
1349 | 1349 | return '' |
|
1350 | 1350 | |
|
1351 | 1351 | try: |
|
1352 | 1352 | return self._flagfunc(path) |
|
1353 | 1353 | except OSError: |
|
1354 | 1354 | return '' |
|
1355 | 1355 | |
|
1356 | 1356 | def filectx(self, path, filelog=None): |
|
1357 | 1357 | """get a file context from the working directory""" |
|
1358 | 1358 | return workingfilectx(self._repo, path, workingctx=self, |
|
1359 | 1359 | filelog=filelog) |
|
1360 | 1360 | |
|
1361 | 1361 | def dirty(self, missing=False, merge=True, branch=True): |
|
1362 | 1362 | "check whether a working directory is modified" |
|
1363 | 1363 | # check subrepos first |
|
1364 | 1364 | for s in sorted(self.substate): |
|
1365 | 1365 | if self.sub(s).dirty(missing=missing): |
|
1366 | 1366 | return True |
|
1367 | 1367 | # check current working dir |
|
1368 | 1368 | return ((merge and self.p2()) or |
|
1369 | 1369 | (branch and self.branch() != self.p1().branch()) or |
|
1370 | 1370 | self.modified() or self.added() or self.removed() or |
|
1371 | 1371 | (missing and self.deleted())) |
|
1372 | 1372 | |
|
1373 | 1373 | def add(self, list, prefix=""): |
|
1374 | 1374 | with self._repo.wlock(): |
|
1375 | 1375 | ui, ds = self._repo.ui, self._repo.dirstate |
|
1376 | 1376 | uipath = lambda f: ds.pathto(pathutil.join(prefix, f)) |
|
1377 | 1377 | rejected = [] |
|
1378 | 1378 | lstat = self._repo.wvfs.lstat |
|
1379 | 1379 | for f in list: |
|
1380 | 1380 | # ds.pathto() returns an absolute file when this is invoked from |
|
1381 | 1381 | # the keyword extension. That gets flagged as non-portable on |
|
1382 | 1382 | # Windows, since it contains the drive letter and colon. |
|
1383 | 1383 | scmutil.checkportable(ui, os.path.join(prefix, f)) |
|
1384 | 1384 | try: |
|
1385 | 1385 | st = lstat(f) |
|
1386 | 1386 | except OSError: |
|
1387 | 1387 | ui.warn(_("%s does not exist!\n") % uipath(f)) |
|
1388 | 1388 | rejected.append(f) |
|
1389 | 1389 | continue |
|
1390 | 1390 | limit = ui.configbytes('ui', 'large-file-limit') |
|
1391 | 1391 | if limit != 0 and st.st_size > limit: |
|
1392 | 1392 | ui.warn(_("%s: up to %d MB of RAM may be required " |
|
1393 | 1393 | "to manage this file\n" |
|
1394 | 1394 | "(use 'hg revert %s' to cancel the " |
|
1395 | 1395 | "pending addition)\n") |
|
1396 | 1396 | % (f, 3 * st.st_size // 1000000, uipath(f))) |
|
1397 | 1397 | if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): |
|
1398 | 1398 | ui.warn(_("%s not added: only files and symlinks " |
|
1399 | 1399 | "supported currently\n") % uipath(f)) |
|
1400 | 1400 | rejected.append(f) |
|
1401 | 1401 | elif ds[f] in 'amn': |
|
1402 | 1402 | ui.warn(_("%s already tracked!\n") % uipath(f)) |
|
1403 | 1403 | elif ds[f] == 'r': |
|
1404 | 1404 | ds.normallookup(f) |
|
1405 | 1405 | else: |
|
1406 | 1406 | ds.add(f) |
|
1407 | 1407 | return rejected |
|
1408 | 1408 | |
|
1409 | 1409 | def forget(self, files, prefix=""): |
|
1410 | 1410 | with self._repo.wlock(): |
|
1411 | 1411 | ds = self._repo.dirstate |
|
1412 | 1412 | uipath = lambda f: ds.pathto(pathutil.join(prefix, f)) |
|
1413 | 1413 | rejected = [] |
|
1414 | 1414 | for f in files: |
|
1415 | 1415 | if f not in ds: |
|
1416 | 1416 | self._repo.ui.warn(_("%s not tracked!\n") % uipath(f)) |
|
1417 | 1417 | rejected.append(f) |
|
1418 | 1418 | elif ds[f] != 'a': |
|
1419 | 1419 | ds.remove(f) |
|
1420 | 1420 | else: |
|
1421 | 1421 | ds.drop(f) |
|
1422 | 1422 | return rejected |
|
1423 | 1423 | |
|
1424 | 1424 | def copy(self, source, dest): |
|
1425 | 1425 | try: |
|
1426 | 1426 | st = self._repo.wvfs.lstat(dest) |
|
1427 | 1427 | except OSError as err: |
|
1428 | 1428 | if err.errno != errno.ENOENT: |
|
1429 | 1429 | raise |
|
1430 | 1430 | self._repo.ui.warn(_("%s does not exist!\n") |
|
1431 | 1431 | % self._repo.dirstate.pathto(dest)) |
|
1432 | 1432 | return |
|
1433 | 1433 | if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): |
|
1434 | 1434 | self._repo.ui.warn(_("copy failed: %s is not a file or a " |
|
1435 | 1435 | "symbolic link\n") |
|
1436 | 1436 | % self._repo.dirstate.pathto(dest)) |
|
1437 | 1437 | else: |
|
1438 | 1438 | with self._repo.wlock(): |
|
1439 | 1439 | ds = self._repo.dirstate |
|
1440 | 1440 | if ds[dest] in '?': |
|
1441 | 1441 | ds.add(dest) |
|
1442 | 1442 | elif ds[dest] in 'r': |
|
1443 | 1443 | ds.normallookup(dest) |
|
1444 | 1444 | ds.copy(source, dest) |
|
1445 | 1445 | |
|
1446 | 1446 | def match(self, pats=None, include=None, exclude=None, default='glob', |
|
1447 | 1447 | listsubrepos=False, badfn=None): |
|
1448 | 1448 | r = self._repo |
|
1449 | 1449 | |
|
1450 | 1450 | # Only a case insensitive filesystem needs magic to translate user input |
|
1451 | 1451 | # to actual case in the filesystem. |
|
1452 | 1452 | icasefs = not util.fscasesensitive(r.root) |
|
1453 | 1453 | return matchmod.match(r.root, r.getcwd(), pats, include, exclude, |
|
1454 | 1454 | default, auditor=r.auditor, ctx=self, |
|
1455 | 1455 | listsubrepos=listsubrepos, badfn=badfn, |
|
1456 | 1456 | icasefs=icasefs) |
|
1457 | 1457 | |
|
1458 | 1458 | def _filtersuspectsymlink(self, files): |
|
1459 | 1459 | if not files or self._repo.dirstate._checklink: |
|
1460 | 1460 | return files |
|
1461 | 1461 | |
|
1462 | 1462 | # Symlink placeholders may get non-symlink-like contents |
|
1463 | 1463 | # via user error or dereferencing by NFS or Samba servers, |
|
1464 | 1464 | # so we filter out any placeholders that don't look like a |
|
1465 | 1465 | # symlink |
|
1466 | 1466 | sane = [] |
|
1467 | 1467 | for f in files: |
|
1468 | 1468 | if self.flags(f) == 'l': |
|
1469 | 1469 | d = self[f].data() |
|
1470 | 1470 | if (d == '' or len(d) >= 1024 or '\n' in d |
|
1471 | 1471 | or stringutil.binary(d)): |
|
1472 | 1472 | self._repo.ui.debug('ignoring suspect symlink placeholder' |
|
1473 | 1473 | ' "%s"\n' % f) |
|
1474 | 1474 | continue |
|
1475 | 1475 | sane.append(f) |
|
1476 | 1476 | return sane |
|
1477 | 1477 | |
|
1478 | 1478 | def _checklookup(self, files): |
|
1479 | 1479 | # check for any possibly clean files |
|
1480 | 1480 | if not files: |
|
1481 | 1481 | return [], [], [] |
|
1482 | 1482 | |
|
1483 | 1483 | modified = [] |
|
1484 | 1484 | deleted = [] |
|
1485 | 1485 | fixup = [] |
|
1486 | 1486 | pctx = self._parents[0] |
|
1487 | 1487 | # do a full compare of any files that might have changed |
|
1488 | 1488 | for f in sorted(files): |
|
1489 | 1489 | try: |
|
1490 | 1490 | # This will return True for a file that got replaced by a |
|
1491 | 1491 | # directory in the interim, but fixing that is pretty hard. |
|
1492 | 1492 | if (f not in pctx or self.flags(f) != pctx.flags(f) |
|
1493 | 1493 | or pctx[f].cmp(self[f])): |
|
1494 | 1494 | modified.append(f) |
|
1495 | 1495 | else: |
|
1496 | 1496 | fixup.append(f) |
|
1497 | 1497 | except (IOError, OSError): |
|
1498 | 1498 | # A file become inaccessible in between? Mark it as deleted, |
|
1499 | 1499 | # matching dirstate behavior (issue5584). |
|
1500 | 1500 | # The dirstate has more complex behavior around whether a |
|
1501 | 1501 | # missing file matches a directory, etc, but we don't need to |
|
1502 | 1502 | # bother with that: if f has made it to this point, we're sure |
|
1503 | 1503 | # it's in the dirstate. |
|
1504 | 1504 | deleted.append(f) |
|
1505 | 1505 | |
|
1506 | 1506 | return modified, deleted, fixup |
|
1507 | 1507 | |
|
1508 | 1508 | def _poststatusfixup(self, status, fixup): |
|
1509 | 1509 | """update dirstate for files that are actually clean""" |
|
1510 | 1510 | poststatus = self._repo.postdsstatus() |
|
1511 | 1511 | if fixup or poststatus: |
|
1512 | 1512 | try: |
|
1513 | 1513 | oldid = self._repo.dirstate.identity() |
|
1514 | 1514 | |
|
1515 | 1515 | # updating the dirstate is optional |
|
1516 | 1516 | # so we don't wait on the lock |
|
1517 | 1517 | # wlock can invalidate the dirstate, so cache normal _after_ |
|
1518 | 1518 | # taking the lock |
|
1519 | 1519 | with self._repo.wlock(False): |
|
1520 | 1520 | if self._repo.dirstate.identity() == oldid: |
|
1521 | 1521 | if fixup: |
|
1522 | 1522 | normal = self._repo.dirstate.normal |
|
1523 | 1523 | for f in fixup: |
|
1524 | 1524 | normal(f) |
|
1525 | 1525 | # write changes out explicitly, because nesting |
|
1526 | 1526 | # wlock at runtime may prevent 'wlock.release()' |
|
1527 | 1527 | # after this block from doing so for subsequent |
|
1528 | 1528 | # changing files |
|
1529 | 1529 | tr = self._repo.currenttransaction() |
|
1530 | 1530 | self._repo.dirstate.write(tr) |
|
1531 | 1531 | |
|
1532 | 1532 | if poststatus: |
|
1533 | 1533 | for ps in poststatus: |
|
1534 | 1534 | ps(self, status) |
|
1535 | 1535 | else: |
|
1536 | 1536 | # in this case, writing changes out breaks |
|
1537 | 1537 | # consistency, because .hg/dirstate was |
|
1538 | 1538 | # already changed simultaneously after last |
|
1539 | 1539 | # caching (see also issue5584 for detail) |
|
1540 | 1540 | self._repo.ui.debug('skip updating dirstate: ' |
|
1541 | 1541 | 'identity mismatch\n') |
|
1542 | 1542 | except error.LockError: |
|
1543 | 1543 | pass |
|
1544 | 1544 | finally: |
|
1545 | 1545 | # Even if the wlock couldn't be grabbed, clear out the list. |
|
1546 | 1546 | self._repo.clearpostdsstatus() |
|
1547 | 1547 | |
|
1548 | 1548 | def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False): |
|
1549 | 1549 | '''Gets the status from the dirstate -- internal use only.''' |
|
1550 | 1550 | subrepos = [] |
|
1551 | 1551 | if '.hgsub' in self: |
|
1552 | 1552 | subrepos = sorted(self.substate) |
|
1553 | 1553 | cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored, |
|
1554 | 1554 | clean=clean, unknown=unknown) |
|
1555 | 1555 | |
|
1556 | 1556 | # check for any possibly clean files |
|
1557 | 1557 | fixup = [] |
|
1558 | 1558 | if cmp: |
|
1559 | 1559 | modified2, deleted2, fixup = self._checklookup(cmp) |
|
1560 | 1560 | s.modified.extend(modified2) |
|
1561 | 1561 | s.deleted.extend(deleted2) |
|
1562 | 1562 | |
|
1563 | 1563 | if fixup and clean: |
|
1564 | 1564 | s.clean.extend(fixup) |
|
1565 | 1565 | |
|
1566 | 1566 | self._poststatusfixup(s, fixup) |
|
1567 | 1567 | |
|
1568 | 1568 | if match.always(): |
|
1569 | 1569 | # cache for performance |
|
1570 | 1570 | if s.unknown or s.ignored or s.clean: |
|
1571 | 1571 | # "_status" is cached with list*=False in the normal route |
|
1572 | 1572 | self._status = scmutil.status(s.modified, s.added, s.removed, |
|
1573 | 1573 | s.deleted, [], [], []) |
|
1574 | 1574 | else: |
|
1575 | 1575 | self._status = s |
|
1576 | 1576 | |
|
1577 | 1577 | return s |
|
1578 | 1578 | |
|
1579 | 1579 | @propertycache |
|
1580 | 1580 | def _copies(self): |
|
1581 | 1581 | p1copies = {} |
|
1582 | 1582 | p2copies = {} |
|
1583 | 1583 | parents = self._repo.dirstate.parents() |
|
1584 | 1584 | p1manifest = self._repo[parents[0]].manifest() |
|
1585 | 1585 | p2manifest = self._repo[parents[1]].manifest() |
|
1586 | 1586 | narrowmatch = self._repo.narrowmatch() |
|
1587 | 1587 | for dst, src in self._repo.dirstate.copies().items(): |
|
1588 | 1588 | if not narrowmatch(dst): |
|
1589 | 1589 | continue |
|
1590 | 1590 | if src in p1manifest: |
|
1591 | 1591 | p1copies[dst] = src |
|
1592 | 1592 | elif src in p2manifest: |
|
1593 | 1593 | p2copies[dst] = src |
|
1594 | 1594 | return p1copies, p2copies |
|
1595 | 1595 | def p1copies(self): |
|
1596 | 1596 | return self._copies[0] |
|
1597 | 1597 | def p2copies(self): |
|
1598 | 1598 | return self._copies[1] |
|
1599 | 1599 | |
|
1600 | 1600 | @propertycache |
|
1601 | 1601 | def _manifest(self): |
|
1602 | 1602 | """generate a manifest corresponding to the values in self._status |
|
1603 | 1603 | |
|
1604 | 1604 | This reuse the file nodeid from parent, but we use special node |
|
1605 | 1605 | identifiers for added and modified files. This is used by manifests |
|
1606 | 1606 | merge to see that files are different and by update logic to avoid |
|
1607 | 1607 | deleting newly added files. |
|
1608 | 1608 | """ |
|
1609 | 1609 | return self._buildstatusmanifest(self._status) |
|
1610 | 1610 | |
|
1611 | 1611 | def _buildstatusmanifest(self, status): |
|
1612 | 1612 | """Builds a manifest that includes the given status results.""" |
|
1613 | 1613 | parents = self.parents() |
|
1614 | 1614 | |
|
1615 | 1615 | man = parents[0].manifest().copy() |
|
1616 | 1616 | |
|
1617 | 1617 | ff = self._flagfunc |
|
1618 | 1618 | for i, l in ((addednodeid, status.added), |
|
1619 | 1619 | (modifiednodeid, status.modified)): |
|
1620 | 1620 | for f in l: |
|
1621 | 1621 | man[f] = i |
|
1622 | 1622 | try: |
|
1623 | 1623 | man.setflag(f, ff(f)) |
|
1624 | 1624 | except OSError: |
|
1625 | 1625 | pass |
|
1626 | 1626 | |
|
1627 | 1627 | for f in status.deleted + status.removed: |
|
1628 | 1628 | if f in man: |
|
1629 | 1629 | del man[f] |
|
1630 | 1630 | |
|
1631 | 1631 | return man |
|
1632 | 1632 | |
|
1633 | 1633 | def _buildstatus(self, other, s, match, listignored, listclean, |
|
1634 | 1634 | listunknown): |
|
1635 | 1635 | """build a status with respect to another context |
|
1636 | 1636 | |
|
1637 | 1637 | This includes logic for maintaining the fast path of status when |
|
1638 | 1638 | comparing the working directory against its parent, which is to skip |
|
1639 | 1639 | building a new manifest if self (working directory) is not comparing |
|
1640 | 1640 | against its parent (repo['.']). |
|
1641 | 1641 | """ |
|
1642 | 1642 | s = self._dirstatestatus(match, listignored, listclean, listunknown) |
|
1643 | 1643 | # Filter out symlinks that, in the case of FAT32 and NTFS filesystems, |
|
1644 | 1644 | # might have accidentally ended up with the entire contents of the file |
|
1645 | 1645 | # they are supposed to be linking to. |
|
1646 | 1646 | s.modified[:] = self._filtersuspectsymlink(s.modified) |
|
1647 | 1647 | if other != self._repo['.']: |
|
1648 | 1648 | s = super(workingctx, self)._buildstatus(other, s, match, |
|
1649 | 1649 | listignored, listclean, |
|
1650 | 1650 | listunknown) |
|
1651 | 1651 | return s |
|
1652 | 1652 | |
|
1653 | 1653 | def _matchstatus(self, other, match): |
|
1654 | 1654 | """override the match method with a filter for directory patterns |
|
1655 | 1655 | |
|
1656 | 1656 | We use inheritance to customize the match.bad method only in cases of |
|
1657 | 1657 | workingctx since it belongs only to the working directory when |
|
1658 | 1658 | comparing against the parent changeset. |
|
1659 | 1659 | |
|
1660 | 1660 | If we aren't comparing against the working directory's parent, then we |
|
1661 | 1661 | just use the default match object sent to us. |
|
1662 | 1662 | """ |
|
1663 | 1663 | if other != self._repo['.']: |
|
1664 | 1664 | def bad(f, msg): |
|
1665 | 1665 | # 'f' may be a directory pattern from 'match.files()', |
|
1666 | 1666 | # so 'f not in ctx1' is not enough |
|
1667 | 1667 | if f not in other and not other.hasdir(f): |
|
1668 | 1668 | self._repo.ui.warn('%s: %s\n' % |
|
1669 | 1669 | (self._repo.dirstate.pathto(f), msg)) |
|
1670 | 1670 | match.bad = bad |
|
1671 | 1671 | return match |
|
1672 | 1672 | |
|
1673 | 1673 | def walk(self, match): |
|
1674 | 1674 | '''Generates matching file names.''' |
|
1675 | 1675 | return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match), |
|
1676 | 1676 | subrepos=sorted(self.substate), |
|
1677 | 1677 | unknown=True, ignored=False)) |
|
1678 | 1678 | |
|
1679 | 1679 | def matches(self, match): |
|
1680 | 1680 | match = self._repo.narrowmatch(match) |
|
1681 | 1681 | ds = self._repo.dirstate |
|
1682 | 1682 | return sorted(f for f in ds.matches(match) if ds[f] != 'r') |
|
1683 | 1683 | |
|
1684 | 1684 | def markcommitted(self, node): |
|
1685 | 1685 | with self._repo.dirstate.parentchange(): |
|
1686 | 1686 | for f in self.modified() + self.added(): |
|
1687 | 1687 | self._repo.dirstate.normal(f) |
|
1688 | 1688 | for f in self.removed(): |
|
1689 | 1689 | self._repo.dirstate.drop(f) |
|
1690 | 1690 | self._repo.dirstate.setparents(node) |
|
1691 | 1691 | |
|
1692 | 1692 | # write changes out explicitly, because nesting wlock at |
|
1693 | 1693 | # runtime may prevent 'wlock.release()' in 'repo.commit()' |
|
1694 | 1694 | # from immediately doing so for subsequent changing files |
|
1695 | 1695 | self._repo.dirstate.write(self._repo.currenttransaction()) |
|
1696 | 1696 | |
|
1697 | 1697 | sparse.aftercommit(self._repo, node) |
|
1698 | 1698 | |
|
1699 | 1699 | class committablefilectx(basefilectx): |
|
1700 | 1700 | """A committablefilectx provides common functionality for a file context |
|
1701 | 1701 | that wants the ability to commit, e.g. workingfilectx or memfilectx.""" |
|
1702 | 1702 | def __init__(self, repo, path, filelog=None, ctx=None): |
|
1703 | 1703 | self._repo = repo |
|
1704 | 1704 | self._path = path |
|
1705 | 1705 | self._changeid = None |
|
1706 | 1706 | self._filerev = self._filenode = None |
|
1707 | 1707 | |
|
1708 | 1708 | if filelog is not None: |
|
1709 | 1709 | self._filelog = filelog |
|
1710 | 1710 | if ctx: |
|
1711 | 1711 | self._changectx = ctx |
|
1712 | 1712 | |
|
1713 | 1713 | def __nonzero__(self): |
|
1714 | 1714 | return True |
|
1715 | 1715 | |
|
1716 | 1716 | __bool__ = __nonzero__ |
|
1717 | 1717 | |
|
1718 | 1718 | def linkrev(self): |
|
1719 | 1719 | # linked to self._changectx no matter if file is modified or not |
|
1720 | 1720 | return self.rev() |
|
1721 | 1721 | |
|
1722 | 1722 | def renamed(self): |
|
1723 | 1723 | path = self.copysource() |
|
1724 | 1724 | if not path: |
|
1725 | 1725 | return None |
|
1726 | 1726 | return path, self._changectx._parents[0]._manifest.get(path, nullid) |
|
1727 | 1727 | |
|
1728 | 1728 | def parents(self): |
|
1729 | 1729 | '''return parent filectxs, following copies if necessary''' |
|
1730 | 1730 | def filenode(ctx, path): |
|
1731 | 1731 | return ctx._manifest.get(path, nullid) |
|
1732 | 1732 | |
|
1733 | 1733 | path = self._path |
|
1734 | 1734 | fl = self._filelog |
|
1735 | 1735 | pcl = self._changectx._parents |
|
1736 | 1736 | renamed = self.renamed() |
|
1737 | 1737 | |
|
1738 | 1738 | if renamed: |
|
1739 | 1739 | pl = [renamed + (None,)] |
|
1740 | 1740 | else: |
|
1741 | 1741 | pl = [(path, filenode(pcl[0], path), fl)] |
|
1742 | 1742 | |
|
1743 | 1743 | for pc in pcl[1:]: |
|
1744 | 1744 | pl.append((path, filenode(pc, path), fl)) |
|
1745 | 1745 | |
|
1746 | 1746 | return [self._parentfilectx(p, fileid=n, filelog=l) |
|
1747 | 1747 | for p, n, l in pl if n != nullid] |
|
1748 | 1748 | |
|
1749 | 1749 | def children(self): |
|
1750 | 1750 | return [] |
|
1751 | 1751 | |
|
1752 | 1752 | class workingfilectx(committablefilectx): |
|
1753 | 1753 | """A workingfilectx object makes access to data related to a particular |
|
1754 | 1754 | file in the working directory convenient.""" |
|
1755 | 1755 | def __init__(self, repo, path, filelog=None, workingctx=None): |
|
1756 | 1756 | super(workingfilectx, self).__init__(repo, path, filelog, workingctx) |
|
1757 | 1757 | |
|
1758 | 1758 | @propertycache |
|
1759 | 1759 | def _changectx(self): |
|
1760 | 1760 | return workingctx(self._repo) |
|
1761 | 1761 | |
|
1762 | 1762 | def data(self): |
|
1763 | 1763 | return self._repo.wread(self._path) |
|
1764 | 1764 | def copysource(self): |
|
1765 | 1765 | return self._repo.dirstate.copied(self._path) |
|
1766 | 1766 | |
|
1767 | 1767 | def size(self): |
|
1768 | 1768 | return self._repo.wvfs.lstat(self._path).st_size |
|
1769 | def lstat(self): | |
|
1770 | return self._repo.wvfs.lstat(self._path) | |
|
1769 | 1771 | def date(self): |
|
1770 | 1772 | t, tz = self._changectx.date() |
|
1771 | 1773 | try: |
|
1772 | 1774 | return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz) |
|
1773 | 1775 | except OSError as err: |
|
1774 | 1776 | if err.errno != errno.ENOENT: |
|
1775 | 1777 | raise |
|
1776 | 1778 | return (t, tz) |
|
1777 | 1779 | |
|
1778 | 1780 | def exists(self): |
|
1779 | 1781 | return self._repo.wvfs.exists(self._path) |
|
1780 | 1782 | |
|
1781 | 1783 | def lexists(self): |
|
1782 | 1784 | return self._repo.wvfs.lexists(self._path) |
|
1783 | 1785 | |
|
1784 | 1786 | def audit(self): |
|
1785 | 1787 | return self._repo.wvfs.audit(self._path) |
|
1786 | 1788 | |
|
1787 | 1789 | def cmp(self, fctx): |
|
1788 | 1790 | """compare with other file context |
|
1789 | 1791 | |
|
1790 | 1792 | returns True if different than fctx. |
|
1791 | 1793 | """ |
|
1792 | 1794 | # fctx should be a filectx (not a workingfilectx) |
|
1793 | 1795 | # invert comparison to reuse the same code path |
|
1794 | 1796 | return fctx.cmp(self) |
|
1795 | 1797 | |
|
1796 | 1798 | def remove(self, ignoremissing=False): |
|
1797 | 1799 | """wraps unlink for a repo's working directory""" |
|
1798 | 1800 | rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs') |
|
1799 | 1801 | self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing, |
|
1800 | 1802 | rmdir=rmdir) |
|
1801 | 1803 | |
|
1802 | 1804 | def write(self, data, flags, backgroundclose=False, **kwargs): |
|
1803 | 1805 | """wraps repo.wwrite""" |
|
1804 | self._repo.wwrite(self._path, data, flags, | |
|
1806 | return self._repo.wwrite(self._path, data, flags, | |
|
1805 | 1807 | backgroundclose=backgroundclose, |
|
1806 | 1808 | **kwargs) |
|
1807 | 1809 | |
|
1808 | 1810 | def markcopied(self, src): |
|
1809 | 1811 | """marks this file a copy of `src`""" |
|
1810 | 1812 | self._repo.dirstate.copy(src, self._path) |
|
1811 | 1813 | |
|
1812 | 1814 | def clearunknown(self): |
|
1813 | 1815 | """Removes conflicting items in the working directory so that |
|
1814 | 1816 | ``write()`` can be called successfully. |
|
1815 | 1817 | """ |
|
1816 | 1818 | wvfs = self._repo.wvfs |
|
1817 | 1819 | f = self._path |
|
1818 | 1820 | wvfs.audit(f) |
|
1819 | 1821 | if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'): |
|
1820 | 1822 | # remove files under the directory as they should already be |
|
1821 | 1823 | # warned and backed up |
|
1822 | 1824 | if wvfs.isdir(f) and not wvfs.islink(f): |
|
1823 | 1825 | wvfs.rmtree(f, forcibly=True) |
|
1824 | 1826 | for p in reversed(list(util.finddirs(f))): |
|
1825 | 1827 | if wvfs.isfileorlink(p): |
|
1826 | 1828 | wvfs.unlink(p) |
|
1827 | 1829 | break |
|
1828 | 1830 | else: |
|
1829 | 1831 | # don't remove files if path conflicts are not processed |
|
1830 | 1832 | if wvfs.isdir(f) and not wvfs.islink(f): |
|
1831 | 1833 | wvfs.removedirs(f) |
|
1832 | 1834 | |
|
1833 | 1835 | def setflags(self, l, x): |
|
1834 | 1836 | self._repo.wvfs.setflags(self._path, l, x) |
|
1835 | 1837 | |
|
1836 | 1838 | class overlayworkingctx(committablectx): |
|
1837 | 1839 | """Wraps another mutable context with a write-back cache that can be |
|
1838 | 1840 | converted into a commit context. |
|
1839 | 1841 | |
|
1840 | 1842 | self._cache[path] maps to a dict with keys: { |
|
1841 | 1843 | 'exists': bool? |
|
1842 | 1844 | 'date': date? |
|
1843 | 1845 | 'data': str? |
|
1844 | 1846 | 'flags': str? |
|
1845 | 1847 | 'copied': str? (path or None) |
|
1846 | 1848 | } |
|
1847 | 1849 | If `exists` is True, `flags` must be non-None and 'date' is non-None. If it |
|
1848 | 1850 | is `False`, the file was deleted. |
|
1849 | 1851 | """ |
|
1850 | 1852 | |
|
1851 | 1853 | def __init__(self, repo): |
|
1852 | 1854 | super(overlayworkingctx, self).__init__(repo) |
|
1853 | 1855 | self.clean() |
|
1854 | 1856 | |
|
1855 | 1857 | def setbase(self, wrappedctx): |
|
1856 | 1858 | self._wrappedctx = wrappedctx |
|
1857 | 1859 | self._parents = [wrappedctx] |
|
1858 | 1860 | # Drop old manifest cache as it is now out of date. |
|
1859 | 1861 | # This is necessary when, e.g., rebasing several nodes with one |
|
1860 | 1862 | # ``overlayworkingctx`` (e.g. with --collapse). |
|
1861 | 1863 | util.clearcachedproperty(self, '_manifest') |
|
1862 | 1864 | |
|
1863 | 1865 | def data(self, path): |
|
1864 | 1866 | if self.isdirty(path): |
|
1865 | 1867 | if self._cache[path]['exists']: |
|
1866 | 1868 | if self._cache[path]['data'] is not None: |
|
1867 | 1869 | return self._cache[path]['data'] |
|
1868 | 1870 | else: |
|
1869 | 1871 | # Must fallback here, too, because we only set flags. |
|
1870 | 1872 | return self._wrappedctx[path].data() |
|
1871 | 1873 | else: |
|
1872 | 1874 | raise error.ProgrammingError("No such file or directory: %s" % |
|
1873 | 1875 | path) |
|
1874 | 1876 | else: |
|
1875 | 1877 | return self._wrappedctx[path].data() |
|
1876 | 1878 | |
|
1877 | 1879 | @propertycache |
|
1878 | 1880 | def _manifest(self): |
|
1879 | 1881 | parents = self.parents() |
|
1880 | 1882 | man = parents[0].manifest().copy() |
|
1881 | 1883 | |
|
1882 | 1884 | flag = self._flagfunc |
|
1883 | 1885 | for path in self.added(): |
|
1884 | 1886 | man[path] = addednodeid |
|
1885 | 1887 | man.setflag(path, flag(path)) |
|
1886 | 1888 | for path in self.modified(): |
|
1887 | 1889 | man[path] = modifiednodeid |
|
1888 | 1890 | man.setflag(path, flag(path)) |
|
1889 | 1891 | for path in self.removed(): |
|
1890 | 1892 | del man[path] |
|
1891 | 1893 | return man |
|
1892 | 1894 | |
|
1893 | 1895 | @propertycache |
|
1894 | 1896 | def _flagfunc(self): |
|
1895 | 1897 | def f(path): |
|
1896 | 1898 | return self._cache[path]['flags'] |
|
1897 | 1899 | return f |
|
1898 | 1900 | |
|
1899 | 1901 | def files(self): |
|
1900 | 1902 | return sorted(self.added() + self.modified() + self.removed()) |
|
1901 | 1903 | |
|
1902 | 1904 | def modified(self): |
|
1903 | 1905 | return [f for f in self._cache.keys() if self._cache[f]['exists'] and |
|
1904 | 1906 | self._existsinparent(f)] |
|
1905 | 1907 | |
|
1906 | 1908 | def added(self): |
|
1907 | 1909 | return [f for f in self._cache.keys() if self._cache[f]['exists'] and |
|
1908 | 1910 | not self._existsinparent(f)] |
|
1909 | 1911 | |
|
1910 | 1912 | def removed(self): |
|
1911 | 1913 | return [f for f in self._cache.keys() if |
|
1912 | 1914 | not self._cache[f]['exists'] and self._existsinparent(f)] |
|
1913 | 1915 | |
|
1914 | 1916 | def p1copies(self): |
|
1915 | 1917 | copies = self._repo._wrappedctx.p1copies().copy() |
|
1916 | 1918 | narrowmatch = self._repo.narrowmatch() |
|
1917 | 1919 | for f in self._cache.keys(): |
|
1918 | 1920 | if not narrowmatch(f): |
|
1919 | 1921 | continue |
|
1920 | 1922 | copies.pop(f, None) # delete if it exists |
|
1921 | 1923 | source = self._cache[f]['copied'] |
|
1922 | 1924 | if source: |
|
1923 | 1925 | copies[f] = source |
|
1924 | 1926 | return copies |
|
1925 | 1927 | |
|
1926 | 1928 | def p2copies(self): |
|
1927 | 1929 | copies = self._repo._wrappedctx.p2copies().copy() |
|
1928 | 1930 | narrowmatch = self._repo.narrowmatch() |
|
1929 | 1931 | for f in self._cache.keys(): |
|
1930 | 1932 | if not narrowmatch(f): |
|
1931 | 1933 | continue |
|
1932 | 1934 | copies.pop(f, None) # delete if it exists |
|
1933 | 1935 | source = self._cache[f]['copied'] |
|
1934 | 1936 | if source: |
|
1935 | 1937 | copies[f] = source |
|
1936 | 1938 | return copies |
|
1937 | 1939 | |
|
1938 | 1940 | def isinmemory(self): |
|
1939 | 1941 | return True |
|
1940 | 1942 | |
|
1941 | 1943 | def filedate(self, path): |
|
1942 | 1944 | if self.isdirty(path): |
|
1943 | 1945 | return self._cache[path]['date'] |
|
1944 | 1946 | else: |
|
1945 | 1947 | return self._wrappedctx[path].date() |
|
1946 | 1948 | |
|
1947 | 1949 | def markcopied(self, path, origin): |
|
1948 | 1950 | self._markdirty(path, exists=True, date=self.filedate(path), |
|
1949 | 1951 | flags=self.flags(path), copied=origin) |
|
1950 | 1952 | |
|
1951 | 1953 | def copydata(self, path): |
|
1952 | 1954 | if self.isdirty(path): |
|
1953 | 1955 | return self._cache[path]['copied'] |
|
1954 | 1956 | else: |
|
1955 | 1957 | return None |
|
1956 | 1958 | |
|
1957 | 1959 | def flags(self, path): |
|
1958 | 1960 | if self.isdirty(path): |
|
1959 | 1961 | if self._cache[path]['exists']: |
|
1960 | 1962 | return self._cache[path]['flags'] |
|
1961 | 1963 | else: |
|
1962 | 1964 | raise error.ProgrammingError("No such file or directory: %s" % |
|
1963 | 1965 | self._path) |
|
1964 | 1966 | else: |
|
1965 | 1967 | return self._wrappedctx[path].flags() |
|
1966 | 1968 | |
|
1967 | 1969 | def __contains__(self, key): |
|
1968 | 1970 | if key in self._cache: |
|
1969 | 1971 | return self._cache[key]['exists'] |
|
1970 | 1972 | return key in self.p1() |
|
1971 | 1973 | |
|
1972 | 1974 | def _existsinparent(self, path): |
|
1973 | 1975 | try: |
|
1974 | 1976 | # ``commitctx` raises a ``ManifestLookupError`` if a path does not |
|
1975 | 1977 | # exist, unlike ``workingctx``, which returns a ``workingfilectx`` |
|
1976 | 1978 | # with an ``exists()`` function. |
|
1977 | 1979 | self._wrappedctx[path] |
|
1978 | 1980 | return True |
|
1979 | 1981 | except error.ManifestLookupError: |
|
1980 | 1982 | return False |
|
1981 | 1983 | |
|
1982 | 1984 | def _auditconflicts(self, path): |
|
1983 | 1985 | """Replicates conflict checks done by wvfs.write(). |
|
1984 | 1986 | |
|
1985 | 1987 | Since we never write to the filesystem and never call `applyupdates` in |
|
1986 | 1988 | IMM, we'll never check that a path is actually writable -- e.g., because |
|
1987 | 1989 | it adds `a/foo`, but `a` is actually a file in the other commit. |
|
1988 | 1990 | """ |
|
1989 | 1991 | def fail(path, component): |
|
1990 | 1992 | # p1() is the base and we're receiving "writes" for p2()'s |
|
1991 | 1993 | # files. |
|
1992 | 1994 | if 'l' in self.p1()[component].flags(): |
|
1993 | 1995 | raise error.Abort("error: %s conflicts with symlink %s " |
|
1994 | 1996 | "in %d." % (path, component, |
|
1995 | 1997 | self.p1().rev())) |
|
1996 | 1998 | else: |
|
1997 | 1999 | raise error.Abort("error: '%s' conflicts with file '%s' in " |
|
1998 | 2000 | "%d." % (path, component, |
|
1999 | 2001 | self.p1().rev())) |
|
2000 | 2002 | |
|
2001 | 2003 | # Test that each new directory to be created to write this path from p2 |
|
2002 | 2004 | # is not a file in p1. |
|
2003 | 2005 | components = path.split('/') |
|
2004 | 2006 | for i in pycompat.xrange(len(components)): |
|
2005 | 2007 | component = "/".join(components[0:i]) |
|
2006 | 2008 | if component in self: |
|
2007 | 2009 | fail(path, component) |
|
2008 | 2010 | |
|
2009 | 2011 | # Test the other direction -- that this path from p2 isn't a directory |
|
2010 | 2012 | # in p1 (test that p1 doesn't have any paths matching `path/*`). |
|
2011 | 2013 | match = self.match([path], default=b'path') |
|
2012 | 2014 | matches = self.p1().manifest().matches(match) |
|
2013 | 2015 | mfiles = matches.keys() |
|
2014 | 2016 | if len(mfiles) > 0: |
|
2015 | 2017 | if len(mfiles) == 1 and mfiles[0] == path: |
|
2016 | 2018 | return |
|
2017 | 2019 | # omit the files which are deleted in current IMM wctx |
|
2018 | 2020 | mfiles = [m for m in mfiles if m in self] |
|
2019 | 2021 | if not mfiles: |
|
2020 | 2022 | return |
|
2021 | 2023 | raise error.Abort("error: file '%s' cannot be written because " |
|
2022 | 2024 | " '%s/' is a directory in %s (containing %d " |
|
2023 | 2025 | "entries: %s)" |
|
2024 | 2026 | % (path, path, self.p1(), len(mfiles), |
|
2025 | 2027 | ', '.join(mfiles))) |
|
2026 | 2028 | |
|
2027 | 2029 | def write(self, path, data, flags='', **kwargs): |
|
2028 | 2030 | if data is None: |
|
2029 | 2031 | raise error.ProgrammingError("data must be non-None") |
|
2030 | 2032 | self._auditconflicts(path) |
|
2031 | 2033 | self._markdirty(path, exists=True, data=data, date=dateutil.makedate(), |
|
2032 | 2034 | flags=flags) |
|
2033 | 2035 | |
|
2034 | 2036 | def setflags(self, path, l, x): |
|
2035 | 2037 | flag = '' |
|
2036 | 2038 | if l: |
|
2037 | 2039 | flag = 'l' |
|
2038 | 2040 | elif x: |
|
2039 | 2041 | flag = 'x' |
|
2040 | 2042 | self._markdirty(path, exists=True, date=dateutil.makedate(), |
|
2041 | 2043 | flags=flag) |
|
2042 | 2044 | |
|
2043 | 2045 | def remove(self, path): |
|
2044 | 2046 | self._markdirty(path, exists=False) |
|
2045 | 2047 | |
|
2046 | 2048 | def exists(self, path): |
|
2047 | 2049 | """exists behaves like `lexists`, but needs to follow symlinks and |
|
2048 | 2050 | return False if they are broken. |
|
2049 | 2051 | """ |
|
2050 | 2052 | if self.isdirty(path): |
|
2051 | 2053 | # If this path exists and is a symlink, "follow" it by calling |
|
2052 | 2054 | # exists on the destination path. |
|
2053 | 2055 | if (self._cache[path]['exists'] and |
|
2054 | 2056 | 'l' in self._cache[path]['flags']): |
|
2055 | 2057 | return self.exists(self._cache[path]['data'].strip()) |
|
2056 | 2058 | else: |
|
2057 | 2059 | return self._cache[path]['exists'] |
|
2058 | 2060 | |
|
2059 | 2061 | return self._existsinparent(path) |
|
2060 | 2062 | |
|
2061 | 2063 | def lexists(self, path): |
|
2062 | 2064 | """lexists returns True if the path exists""" |
|
2063 | 2065 | if self.isdirty(path): |
|
2064 | 2066 | return self._cache[path]['exists'] |
|
2065 | 2067 | |
|
2066 | 2068 | return self._existsinparent(path) |
|
2067 | 2069 | |
|
2068 | 2070 | def size(self, path): |
|
2069 | 2071 | if self.isdirty(path): |
|
2070 | 2072 | if self._cache[path]['exists']: |
|
2071 | 2073 | return len(self._cache[path]['data']) |
|
2072 | 2074 | else: |
|
2073 | 2075 | raise error.ProgrammingError("No such file or directory: %s" % |
|
2074 | 2076 | self._path) |
|
2075 | 2077 | return self._wrappedctx[path].size() |
|
2076 | 2078 | |
|
2077 | 2079 | def tomemctx(self, text, branch=None, extra=None, date=None, parents=None, |
|
2078 | 2080 | user=None, editor=None): |
|
2079 | 2081 | """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be |
|
2080 | 2082 | committed. |
|
2081 | 2083 | |
|
2082 | 2084 | ``text`` is the commit message. |
|
2083 | 2085 | ``parents`` (optional) are rev numbers. |
|
2084 | 2086 | """ |
|
2085 | 2087 | # Default parents to the wrapped contexts' if not passed. |
|
2086 | 2088 | if parents is None: |
|
2087 | 2089 | parents = self._wrappedctx.parents() |
|
2088 | 2090 | if len(parents) == 1: |
|
2089 | 2091 | parents = (parents[0], None) |
|
2090 | 2092 | |
|
2091 | 2093 | # ``parents`` is passed as rev numbers; convert to ``commitctxs``. |
|
2092 | 2094 | if parents[1] is None: |
|
2093 | 2095 | parents = (self._repo[parents[0]], None) |
|
2094 | 2096 | else: |
|
2095 | 2097 | parents = (self._repo[parents[0]], self._repo[parents[1]]) |
|
2096 | 2098 | |
|
2097 | 2099 | files = self.files() |
|
2098 | 2100 | def getfile(repo, memctx, path): |
|
2099 | 2101 | if self._cache[path]['exists']: |
|
2100 | 2102 | return memfilectx(repo, memctx, path, |
|
2101 | 2103 | self._cache[path]['data'], |
|
2102 | 2104 | 'l' in self._cache[path]['flags'], |
|
2103 | 2105 | 'x' in self._cache[path]['flags'], |
|
2104 | 2106 | self._cache[path]['copied']) |
|
2105 | 2107 | else: |
|
2106 | 2108 | # Returning None, but including the path in `files`, is |
|
2107 | 2109 | # necessary for memctx to register a deletion. |
|
2108 | 2110 | return None |
|
2109 | 2111 | return memctx(self._repo, parents, text, files, getfile, date=date, |
|
2110 | 2112 | extra=extra, user=user, branch=branch, editor=editor) |
|
2111 | 2113 | |
|
2112 | 2114 | def isdirty(self, path): |
|
2113 | 2115 | return path in self._cache |
|
2114 | 2116 | |
|
2115 | 2117 | def isempty(self): |
|
2116 | 2118 | # We need to discard any keys that are actually clean before the empty |
|
2117 | 2119 | # commit check. |
|
2118 | 2120 | self._compact() |
|
2119 | 2121 | return len(self._cache) == 0 |
|
2120 | 2122 | |
|
2121 | 2123 | def clean(self): |
|
2122 | 2124 | self._cache = {} |
|
2123 | 2125 | |
|
2124 | 2126 | def _compact(self): |
|
2125 | 2127 | """Removes keys from the cache that are actually clean, by comparing |
|
2126 | 2128 | them with the underlying context. |
|
2127 | 2129 | |
|
2128 | 2130 | This can occur during the merge process, e.g. by passing --tool :local |
|
2129 | 2131 | to resolve a conflict. |
|
2130 | 2132 | """ |
|
2131 | 2133 | keys = [] |
|
2132 | 2134 | # This won't be perfect, but can help performance significantly when |
|
2133 | 2135 | # using things like remotefilelog. |
|
2134 | 2136 | scmutil.prefetchfiles( |
|
2135 | 2137 | self.repo(), [self.p1().rev()], |
|
2136 | 2138 | scmutil.matchfiles(self.repo(), self._cache.keys())) |
|
2137 | 2139 | |
|
2138 | 2140 | for path in self._cache.keys(): |
|
2139 | 2141 | cache = self._cache[path] |
|
2140 | 2142 | try: |
|
2141 | 2143 | underlying = self._wrappedctx[path] |
|
2142 | 2144 | if (underlying.data() == cache['data'] and |
|
2143 | 2145 | underlying.flags() == cache['flags']): |
|
2144 | 2146 | keys.append(path) |
|
2145 | 2147 | except error.ManifestLookupError: |
|
2146 | 2148 | # Path not in the underlying manifest (created). |
|
2147 | 2149 | continue |
|
2148 | 2150 | |
|
2149 | 2151 | for path in keys: |
|
2150 | 2152 | del self._cache[path] |
|
2151 | 2153 | return keys |
|
2152 | 2154 | |
|
2153 | 2155 | def _markdirty(self, path, exists, data=None, date=None, flags='', |
|
2154 | 2156 | copied=None): |
|
2155 | 2157 | # data not provided, let's see if we already have some; if not, let's |
|
2156 | 2158 | # grab it from our underlying context, so that we always have data if |
|
2157 | 2159 | # the file is marked as existing. |
|
2158 | 2160 | if exists and data is None: |
|
2159 | 2161 | oldentry = self._cache.get(path) or {} |
|
2160 | 2162 | data = oldentry.get('data') or self._wrappedctx[path].data() |
|
2161 | 2163 | |
|
2162 | 2164 | self._cache[path] = { |
|
2163 | 2165 | 'exists': exists, |
|
2164 | 2166 | 'data': data, |
|
2165 | 2167 | 'date': date, |
|
2166 | 2168 | 'flags': flags, |
|
2167 | 2169 | 'copied': copied, |
|
2168 | 2170 | } |
|
2169 | 2171 | |
|
2170 | 2172 | def filectx(self, path, filelog=None): |
|
2171 | 2173 | return overlayworkingfilectx(self._repo, path, parent=self, |
|
2172 | 2174 | filelog=filelog) |
|
2173 | 2175 | |
|
2174 | 2176 | class overlayworkingfilectx(committablefilectx): |
|
2175 | 2177 | """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory |
|
2176 | 2178 | cache, which can be flushed through later by calling ``flush()``.""" |
|
2177 | 2179 | |
|
2178 | 2180 | def __init__(self, repo, path, filelog=None, parent=None): |
|
2179 | 2181 | super(overlayworkingfilectx, self).__init__(repo, path, filelog, |
|
2180 | 2182 | parent) |
|
2181 | 2183 | self._repo = repo |
|
2182 | 2184 | self._parent = parent |
|
2183 | 2185 | self._path = path |
|
2184 | 2186 | |
|
2185 | 2187 | def cmp(self, fctx): |
|
2186 | 2188 | return self.data() != fctx.data() |
|
2187 | 2189 | |
|
2188 | 2190 | def changectx(self): |
|
2189 | 2191 | return self._parent |
|
2190 | 2192 | |
|
2191 | 2193 | def data(self): |
|
2192 | 2194 | return self._parent.data(self._path) |
|
2193 | 2195 | |
|
2194 | 2196 | def date(self): |
|
2195 | 2197 | return self._parent.filedate(self._path) |
|
2196 | 2198 | |
|
2197 | 2199 | def exists(self): |
|
2198 | 2200 | return self.lexists() |
|
2199 | 2201 | |
|
2200 | 2202 | def lexists(self): |
|
2201 | 2203 | return self._parent.exists(self._path) |
|
2202 | 2204 | |
|
2203 | 2205 | def copysource(self): |
|
2204 | 2206 | return self._parent.copydata(self._path) |
|
2205 | 2207 | |
|
2206 | 2208 | def size(self): |
|
2207 | 2209 | return self._parent.size(self._path) |
|
2208 | 2210 | |
|
2209 | 2211 | def markcopied(self, origin): |
|
2210 | 2212 | self._parent.markcopied(self._path, origin) |
|
2211 | 2213 | |
|
2212 | 2214 | def audit(self): |
|
2213 | 2215 | pass |
|
2214 | 2216 | |
|
2215 | 2217 | def flags(self): |
|
2216 | 2218 | return self._parent.flags(self._path) |
|
2217 | 2219 | |
|
2218 | 2220 | def setflags(self, islink, isexec): |
|
2219 | 2221 | return self._parent.setflags(self._path, islink, isexec) |
|
2220 | 2222 | |
|
2221 | 2223 | def write(self, data, flags, backgroundclose=False, **kwargs): |
|
2222 | 2224 | return self._parent.write(self._path, data, flags, **kwargs) |
|
2223 | 2225 | |
|
2224 | 2226 | def remove(self, ignoremissing=False): |
|
2225 | 2227 | return self._parent.remove(self._path) |
|
2226 | 2228 | |
|
2227 | 2229 | def clearunknown(self): |
|
2228 | 2230 | pass |
|
2229 | 2231 | |
|
2230 | 2232 | class workingcommitctx(workingctx): |
|
2231 | 2233 | """A workingcommitctx object makes access to data related to |
|
2232 | 2234 | the revision being committed convenient. |
|
2233 | 2235 | |
|
2234 | 2236 | This hides changes in the working directory, if they aren't |
|
2235 | 2237 | committed in this context. |
|
2236 | 2238 | """ |
|
2237 | 2239 | def __init__(self, repo, changes, |
|
2238 | 2240 | text="", user=None, date=None, extra=None): |
|
2239 | 2241 | super(workingcommitctx, self).__init__(repo, text, user, date, extra, |
|
2240 | 2242 | changes) |
|
2241 | 2243 | |
|
2242 | 2244 | def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False): |
|
2243 | 2245 | """Return matched files only in ``self._status`` |
|
2244 | 2246 | |
|
2245 | 2247 | Uncommitted files appear "clean" via this context, even if |
|
2246 | 2248 | they aren't actually so in the working directory. |
|
2247 | 2249 | """ |
|
2248 | 2250 | if clean: |
|
2249 | 2251 | clean = [f for f in self._manifest if f not in self._changedset] |
|
2250 | 2252 | else: |
|
2251 | 2253 | clean = [] |
|
2252 | 2254 | return scmutil.status([f for f in self._status.modified if match(f)], |
|
2253 | 2255 | [f for f in self._status.added if match(f)], |
|
2254 | 2256 | [f for f in self._status.removed if match(f)], |
|
2255 | 2257 | [], [], [], clean) |
|
2256 | 2258 | |
|
2257 | 2259 | @propertycache |
|
2258 | 2260 | def _changedset(self): |
|
2259 | 2261 | """Return the set of files changed in this context |
|
2260 | 2262 | """ |
|
2261 | 2263 | changed = set(self._status.modified) |
|
2262 | 2264 | changed.update(self._status.added) |
|
2263 | 2265 | changed.update(self._status.removed) |
|
2264 | 2266 | return changed |
|
2265 | 2267 | |
|
2266 | 2268 | def makecachingfilectxfn(func): |
|
2267 | 2269 | """Create a filectxfn that caches based on the path. |
|
2268 | 2270 | |
|
2269 | 2271 | We can't use util.cachefunc because it uses all arguments as the cache |
|
2270 | 2272 | key and this creates a cycle since the arguments include the repo and |
|
2271 | 2273 | memctx. |
|
2272 | 2274 | """ |
|
2273 | 2275 | cache = {} |
|
2274 | 2276 | |
|
2275 | 2277 | def getfilectx(repo, memctx, path): |
|
2276 | 2278 | if path not in cache: |
|
2277 | 2279 | cache[path] = func(repo, memctx, path) |
|
2278 | 2280 | return cache[path] |
|
2279 | 2281 | |
|
2280 | 2282 | return getfilectx |
|
2281 | 2283 | |
|
2282 | 2284 | def memfilefromctx(ctx): |
|
2283 | 2285 | """Given a context return a memfilectx for ctx[path] |
|
2284 | 2286 | |
|
2285 | 2287 | This is a convenience method for building a memctx based on another |
|
2286 | 2288 | context. |
|
2287 | 2289 | """ |
|
2288 | 2290 | def getfilectx(repo, memctx, path): |
|
2289 | 2291 | fctx = ctx[path] |
|
2290 | 2292 | copysource = fctx.copysource() |
|
2291 | 2293 | return memfilectx(repo, memctx, path, fctx.data(), |
|
2292 | 2294 | islink=fctx.islink(), isexec=fctx.isexec(), |
|
2293 | 2295 | copysource=copysource) |
|
2294 | 2296 | |
|
2295 | 2297 | return getfilectx |
|
2296 | 2298 | |
|
2297 | 2299 | def memfilefrompatch(patchstore): |
|
2298 | 2300 | """Given a patch (e.g. patchstore object) return a memfilectx |
|
2299 | 2301 | |
|
2300 | 2302 | This is a convenience method for building a memctx based on a patchstore. |
|
2301 | 2303 | """ |
|
2302 | 2304 | def getfilectx(repo, memctx, path): |
|
2303 | 2305 | data, mode, copysource = patchstore.getfile(path) |
|
2304 | 2306 | if data is None: |
|
2305 | 2307 | return None |
|
2306 | 2308 | islink, isexec = mode |
|
2307 | 2309 | return memfilectx(repo, memctx, path, data, islink=islink, |
|
2308 | 2310 | isexec=isexec, copysource=copysource) |
|
2309 | 2311 | |
|
2310 | 2312 | return getfilectx |
|
2311 | 2313 | |
|
2312 | 2314 | class memctx(committablectx): |
|
2313 | 2315 | """Use memctx to perform in-memory commits via localrepo.commitctx(). |
|
2314 | 2316 | |
|
2315 | 2317 | Revision information is supplied at initialization time while |
|
2316 | 2318 | related files data and is made available through a callback |
|
2317 | 2319 | mechanism. 'repo' is the current localrepo, 'parents' is a |
|
2318 | 2320 | sequence of two parent revisions identifiers (pass None for every |
|
2319 | 2321 | missing parent), 'text' is the commit message and 'files' lists |
|
2320 | 2322 | names of files touched by the revision (normalized and relative to |
|
2321 | 2323 | repository root). |
|
2322 | 2324 | |
|
2323 | 2325 | filectxfn(repo, memctx, path) is a callable receiving the |
|
2324 | 2326 | repository, the current memctx object and the normalized path of |
|
2325 | 2327 | requested file, relative to repository root. It is fired by the |
|
2326 | 2328 | commit function for every file in 'files', but calls order is |
|
2327 | 2329 | undefined. If the file is available in the revision being |
|
2328 | 2330 | committed (updated or added), filectxfn returns a memfilectx |
|
2329 | 2331 | object. If the file was removed, filectxfn return None for recent |
|
2330 | 2332 | Mercurial. Moved files are represented by marking the source file |
|
2331 | 2333 | removed and the new file added with copy information (see |
|
2332 | 2334 | memfilectx). |
|
2333 | 2335 | |
|
2334 | 2336 | user receives the committer name and defaults to current |
|
2335 | 2337 | repository username, date is the commit date in any format |
|
2336 | 2338 | supported by dateutil.parsedate() and defaults to current date, extra |
|
2337 | 2339 | is a dictionary of metadata or is left empty. |
|
2338 | 2340 | """ |
|
2339 | 2341 | |
|
2340 | 2342 | # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files. |
|
2341 | 2343 | # Extensions that need to retain compatibility across Mercurial 3.1 can use |
|
2342 | 2344 | # this field to determine what to do in filectxfn. |
|
2343 | 2345 | _returnnoneformissingfiles = True |
|
2344 | 2346 | |
|
2345 | 2347 | def __init__(self, repo, parents, text, files, filectxfn, user=None, |
|
2346 | 2348 | date=None, extra=None, branch=None, editor=False): |
|
2347 | 2349 | super(memctx, self).__init__(repo, text, user, date, extra, |
|
2348 | 2350 | branch=branch) |
|
2349 | 2351 | self._rev = None |
|
2350 | 2352 | self._node = None |
|
2351 | 2353 | parents = [(p or nullid) for p in parents] |
|
2352 | 2354 | p1, p2 = parents |
|
2353 | 2355 | self._parents = [self._repo[p] for p in (p1, p2)] |
|
2354 | 2356 | files = sorted(set(files)) |
|
2355 | 2357 | self._files = files |
|
2356 | 2358 | self.substate = {} |
|
2357 | 2359 | |
|
2358 | 2360 | if isinstance(filectxfn, patch.filestore): |
|
2359 | 2361 | filectxfn = memfilefrompatch(filectxfn) |
|
2360 | 2362 | elif not callable(filectxfn): |
|
2361 | 2363 | # if store is not callable, wrap it in a function |
|
2362 | 2364 | filectxfn = memfilefromctx(filectxfn) |
|
2363 | 2365 | |
|
2364 | 2366 | # memoizing increases performance for e.g. vcs convert scenarios. |
|
2365 | 2367 | self._filectxfn = makecachingfilectxfn(filectxfn) |
|
2366 | 2368 | |
|
2367 | 2369 | if editor: |
|
2368 | 2370 | self._text = editor(self._repo, self, []) |
|
2369 | 2371 | self._repo.savecommitmessage(self._text) |
|
2370 | 2372 | |
|
2371 | 2373 | def filectx(self, path, filelog=None): |
|
2372 | 2374 | """get a file context from the working directory |
|
2373 | 2375 | |
|
2374 | 2376 | Returns None if file doesn't exist and should be removed.""" |
|
2375 | 2377 | return self._filectxfn(self._repo, self, path) |
|
2376 | 2378 | |
|
2377 | 2379 | def commit(self): |
|
2378 | 2380 | """commit context to the repo""" |
|
2379 | 2381 | return self._repo.commitctx(self) |
|
2380 | 2382 | |
|
2381 | 2383 | @propertycache |
|
2382 | 2384 | def _manifest(self): |
|
2383 | 2385 | """generate a manifest based on the return values of filectxfn""" |
|
2384 | 2386 | |
|
2385 | 2387 | # keep this simple for now; just worry about p1 |
|
2386 | 2388 | pctx = self._parents[0] |
|
2387 | 2389 | man = pctx.manifest().copy() |
|
2388 | 2390 | |
|
2389 | 2391 | for f in self._status.modified: |
|
2390 | 2392 | man[f] = modifiednodeid |
|
2391 | 2393 | |
|
2392 | 2394 | for f in self._status.added: |
|
2393 | 2395 | man[f] = addednodeid |
|
2394 | 2396 | |
|
2395 | 2397 | for f in self._status.removed: |
|
2396 | 2398 | if f in man: |
|
2397 | 2399 | del man[f] |
|
2398 | 2400 | |
|
2399 | 2401 | return man |
|
2400 | 2402 | |
|
2401 | 2403 | @propertycache |
|
2402 | 2404 | def _status(self): |
|
2403 | 2405 | """Calculate exact status from ``files`` specified at construction |
|
2404 | 2406 | """ |
|
2405 | 2407 | man1 = self.p1().manifest() |
|
2406 | 2408 | p2 = self._parents[1] |
|
2407 | 2409 | # "1 < len(self._parents)" can't be used for checking |
|
2408 | 2410 | # existence of the 2nd parent, because "memctx._parents" is |
|
2409 | 2411 | # explicitly initialized by the list, of which length is 2. |
|
2410 | 2412 | if p2.node() != nullid: |
|
2411 | 2413 | man2 = p2.manifest() |
|
2412 | 2414 | managing = lambda f: f in man1 or f in man2 |
|
2413 | 2415 | else: |
|
2414 | 2416 | managing = lambda f: f in man1 |
|
2415 | 2417 | |
|
2416 | 2418 | modified, added, removed = [], [], [] |
|
2417 | 2419 | for f in self._files: |
|
2418 | 2420 | if not managing(f): |
|
2419 | 2421 | added.append(f) |
|
2420 | 2422 | elif self[f]: |
|
2421 | 2423 | modified.append(f) |
|
2422 | 2424 | else: |
|
2423 | 2425 | removed.append(f) |
|
2424 | 2426 | |
|
2425 | 2427 | return scmutil.status(modified, added, removed, [], [], [], []) |
|
2426 | 2428 | |
|
2427 | 2429 | class memfilectx(committablefilectx): |
|
2428 | 2430 | """memfilectx represents an in-memory file to commit. |
|
2429 | 2431 | |
|
2430 | 2432 | See memctx and committablefilectx for more details. |
|
2431 | 2433 | """ |
|
2432 | 2434 | def __init__(self, repo, changectx, path, data, islink=False, |
|
2433 | 2435 | isexec=False, copysource=None): |
|
2434 | 2436 | """ |
|
2435 | 2437 | path is the normalized file path relative to repository root. |
|
2436 | 2438 | data is the file content as a string. |
|
2437 | 2439 | islink is True if the file is a symbolic link. |
|
2438 | 2440 | isexec is True if the file is executable. |
|
2439 | 2441 | copied is the source file path if current file was copied in the |
|
2440 | 2442 | revision being committed, or None.""" |
|
2441 | 2443 | super(memfilectx, self).__init__(repo, path, None, changectx) |
|
2442 | 2444 | self._data = data |
|
2443 | 2445 | if islink: |
|
2444 | 2446 | self._flags = 'l' |
|
2445 | 2447 | elif isexec: |
|
2446 | 2448 | self._flags = 'x' |
|
2447 | 2449 | else: |
|
2448 | 2450 | self._flags = '' |
|
2449 | 2451 | self._copysource = copysource |
|
2450 | 2452 | |
|
2451 | 2453 | def copysource(self): |
|
2452 | 2454 | return self._copysource |
|
2453 | 2455 | |
|
2454 | 2456 | def cmp(self, fctx): |
|
2455 | 2457 | return self.data() != fctx.data() |
|
2456 | 2458 | |
|
2457 | 2459 | def data(self): |
|
2458 | 2460 | return self._data |
|
2459 | 2461 | |
|
2460 | 2462 | def remove(self, ignoremissing=False): |
|
2461 | 2463 | """wraps unlink for a repo's working directory""" |
|
2462 | 2464 | # need to figure out what to do here |
|
2463 | 2465 | del self._changectx[self._path] |
|
2464 | 2466 | |
|
2465 | 2467 | def write(self, data, flags, **kwargs): |
|
2466 | 2468 | """wraps repo.wwrite""" |
|
2467 | 2469 | self._data = data |
|
2468 | 2470 | |
|
2469 | 2471 | |
|
2470 | 2472 | class metadataonlyctx(committablectx): |
|
2471 | 2473 | """Like memctx but it's reusing the manifest of different commit. |
|
2472 | 2474 | Intended to be used by lightweight operations that are creating |
|
2473 | 2475 | metadata-only changes. |
|
2474 | 2476 | |
|
2475 | 2477 | Revision information is supplied at initialization time. 'repo' is the |
|
2476 | 2478 | current localrepo, 'ctx' is original revision which manifest we're reuisng |
|
2477 | 2479 | 'parents' is a sequence of two parent revisions identifiers (pass None for |
|
2478 | 2480 | every missing parent), 'text' is the commit. |
|
2479 | 2481 | |
|
2480 | 2482 | user receives the committer name and defaults to current repository |
|
2481 | 2483 | username, date is the commit date in any format supported by |
|
2482 | 2484 | dateutil.parsedate() and defaults to current date, extra is a dictionary of |
|
2483 | 2485 | metadata or is left empty. |
|
2484 | 2486 | """ |
|
2485 | 2487 | def __init__(self, repo, originalctx, parents=None, text=None, user=None, |
|
2486 | 2488 | date=None, extra=None, editor=False): |
|
2487 | 2489 | if text is None: |
|
2488 | 2490 | text = originalctx.description() |
|
2489 | 2491 | super(metadataonlyctx, self).__init__(repo, text, user, date, extra) |
|
2490 | 2492 | self._rev = None |
|
2491 | 2493 | self._node = None |
|
2492 | 2494 | self._originalctx = originalctx |
|
2493 | 2495 | self._manifestnode = originalctx.manifestnode() |
|
2494 | 2496 | if parents is None: |
|
2495 | 2497 | parents = originalctx.parents() |
|
2496 | 2498 | else: |
|
2497 | 2499 | parents = [repo[p] for p in parents if p is not None] |
|
2498 | 2500 | parents = parents[:] |
|
2499 | 2501 | while len(parents) < 2: |
|
2500 | 2502 | parents.append(repo[nullid]) |
|
2501 | 2503 | p1, p2 = self._parents = parents |
|
2502 | 2504 | |
|
2503 | 2505 | # sanity check to ensure that the reused manifest parents are |
|
2504 | 2506 | # manifests of our commit parents |
|
2505 | 2507 | mp1, mp2 = self.manifestctx().parents |
|
2506 | 2508 | if p1 != nullid and p1.manifestnode() != mp1: |
|
2507 | 2509 | raise RuntimeError(r"can't reuse the manifest: its p1 " |
|
2508 | 2510 | r"doesn't match the new ctx p1") |
|
2509 | 2511 | if p2 != nullid and p2.manifestnode() != mp2: |
|
2510 | 2512 | raise RuntimeError(r"can't reuse the manifest: " |
|
2511 | 2513 | r"its p2 doesn't match the new ctx p2") |
|
2512 | 2514 | |
|
2513 | 2515 | self._files = originalctx.files() |
|
2514 | 2516 | self.substate = {} |
|
2515 | 2517 | |
|
2516 | 2518 | if editor: |
|
2517 | 2519 | self._text = editor(self._repo, self, []) |
|
2518 | 2520 | self._repo.savecommitmessage(self._text) |
|
2519 | 2521 | |
|
2520 | 2522 | def manifestnode(self): |
|
2521 | 2523 | return self._manifestnode |
|
2522 | 2524 | |
|
2523 | 2525 | @property |
|
2524 | 2526 | def _manifestctx(self): |
|
2525 | 2527 | return self._repo.manifestlog[self._manifestnode] |
|
2526 | 2528 | |
|
2527 | 2529 | def filectx(self, path, filelog=None): |
|
2528 | 2530 | return self._originalctx.filectx(path, filelog=filelog) |
|
2529 | 2531 | |
|
2530 | 2532 | def commit(self): |
|
2531 | 2533 | """commit context to the repo""" |
|
2532 | 2534 | return self._repo.commitctx(self) |
|
2533 | 2535 | |
|
2534 | 2536 | @property |
|
2535 | 2537 | def _manifest(self): |
|
2536 | 2538 | return self._originalctx.manifest() |
|
2537 | 2539 | |
|
2538 | 2540 | @propertycache |
|
2539 | 2541 | def _status(self): |
|
2540 | 2542 | """Calculate exact status from ``files`` specified in the ``origctx`` |
|
2541 | 2543 | and parents manifests. |
|
2542 | 2544 | """ |
|
2543 | 2545 | man1 = self.p1().manifest() |
|
2544 | 2546 | p2 = self._parents[1] |
|
2545 | 2547 | # "1 < len(self._parents)" can't be used for checking |
|
2546 | 2548 | # existence of the 2nd parent, because "metadataonlyctx._parents" is |
|
2547 | 2549 | # explicitly initialized by the list, of which length is 2. |
|
2548 | 2550 | if p2.node() != nullid: |
|
2549 | 2551 | man2 = p2.manifest() |
|
2550 | 2552 | managing = lambda f: f in man1 or f in man2 |
|
2551 | 2553 | else: |
|
2552 | 2554 | managing = lambda f: f in man1 |
|
2553 | 2555 | |
|
2554 | 2556 | modified, added, removed = [], [], [] |
|
2555 | 2557 | for f in self._files: |
|
2556 | 2558 | if not managing(f): |
|
2557 | 2559 | added.append(f) |
|
2558 | 2560 | elif f in self: |
|
2559 | 2561 | modified.append(f) |
|
2560 | 2562 | else: |
|
2561 | 2563 | removed.append(f) |
|
2562 | 2564 | |
|
2563 | 2565 | return scmutil.status(modified, added, removed, [], [], [], []) |
|
2564 | 2566 | |
|
2565 | 2567 | class arbitraryfilectx(object): |
|
2566 | 2568 | """Allows you to use filectx-like functions on a file in an arbitrary |
|
2567 | 2569 | location on disk, possibly not in the working directory. |
|
2568 | 2570 | """ |
|
2569 | 2571 | def __init__(self, path, repo=None): |
|
2570 | 2572 | # Repo is optional because contrib/simplemerge uses this class. |
|
2571 | 2573 | self._repo = repo |
|
2572 | 2574 | self._path = path |
|
2573 | 2575 | |
|
2574 | 2576 | def cmp(self, fctx): |
|
2575 | 2577 | # filecmp follows symlinks whereas `cmp` should not, so skip the fast |
|
2576 | 2578 | # path if either side is a symlink. |
|
2577 | 2579 | symlinks = ('l' in self.flags() or 'l' in fctx.flags()) |
|
2578 | 2580 | if not symlinks and isinstance(fctx, workingfilectx) and self._repo: |
|
2579 | 2581 | # Add a fast-path for merge if both sides are disk-backed. |
|
2580 | 2582 | # Note that filecmp uses the opposite return values (True if same) |
|
2581 | 2583 | # from our cmp functions (True if different). |
|
2582 | 2584 | return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path())) |
|
2583 | 2585 | return self.data() != fctx.data() |
|
2584 | 2586 | |
|
2585 | 2587 | def path(self): |
|
2586 | 2588 | return self._path |
|
2587 | 2589 | |
|
2588 | 2590 | def flags(self): |
|
2589 | 2591 | return '' |
|
2590 | 2592 | |
|
2591 | 2593 | def data(self): |
|
2592 | 2594 | return util.readfile(self._path) |
|
2593 | 2595 | |
|
2594 | 2596 | def decodeddata(self): |
|
2595 | 2597 | with open(self._path, "rb") as f: |
|
2596 | 2598 | return f.read() |
|
2597 | 2599 | |
|
2598 | 2600 | def remove(self): |
|
2599 | 2601 | util.unlink(self._path) |
|
2600 | 2602 | |
|
2601 | 2603 | def write(self, data, flags, **kwargs): |
|
2602 | 2604 | assert not flags |
|
2603 | 2605 | with open(self._path, "wb") as f: |
|
2604 | 2606 | f.write(data) |
@@ -1,1506 +1,1518 | |||
|
1 | 1 | # dirstate.py - working directory tracking for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import collections |
|
11 | 11 | import contextlib |
|
12 | 12 | import errno |
|
13 | 13 | import os |
|
14 | 14 | import stat |
|
15 | 15 | |
|
16 | 16 | from .i18n import _ |
|
17 | 17 | from .node import nullid |
|
18 | 18 | from . import ( |
|
19 | 19 | encoding, |
|
20 | 20 | error, |
|
21 | 21 | match as matchmod, |
|
22 | 22 | pathutil, |
|
23 | 23 | policy, |
|
24 | 24 | pycompat, |
|
25 | 25 | scmutil, |
|
26 | 26 | txnutil, |
|
27 | 27 | util, |
|
28 | 28 | ) |
|
29 | 29 | |
|
30 | 30 | parsers = policy.importmod(r'parsers') |
|
31 | 31 | dirstatemod = policy.importrust(r'dirstate', default=parsers) |
|
32 | 32 | |
|
33 | 33 | propertycache = util.propertycache |
|
34 | 34 | filecache = scmutil.filecache |
|
35 | 35 | _rangemask = 0x7fffffff |
|
36 | 36 | |
|
37 | 37 | dirstatetuple = parsers.dirstatetuple |
|
38 | 38 | |
|
39 | 39 | class repocache(filecache): |
|
40 | 40 | """filecache for files in .hg/""" |
|
41 | 41 | def join(self, obj, fname): |
|
42 | 42 | return obj._opener.join(fname) |
|
43 | 43 | |
|
44 | 44 | class rootcache(filecache): |
|
45 | 45 | """filecache for files in the repository root""" |
|
46 | 46 | def join(self, obj, fname): |
|
47 | 47 | return obj._join(fname) |
|
48 | 48 | |
|
49 | 49 | def _getfsnow(vfs): |
|
50 | 50 | '''Get "now" timestamp on filesystem''' |
|
51 | 51 | tmpfd, tmpname = vfs.mkstemp() |
|
52 | 52 | try: |
|
53 | 53 | return os.fstat(tmpfd)[stat.ST_MTIME] |
|
54 | 54 | finally: |
|
55 | 55 | os.close(tmpfd) |
|
56 | 56 | vfs.unlink(tmpname) |
|
57 | 57 | |
|
58 | 58 | class dirstate(object): |
|
59 | 59 | |
|
60 | 60 | def __init__(self, opener, ui, root, validate, sparsematchfn): |
|
61 | 61 | '''Create a new dirstate object. |
|
62 | 62 | |
|
63 | 63 | opener is an open()-like callable that can be used to open the |
|
64 | 64 | dirstate file; root is the root of the directory tracked by |
|
65 | 65 | the dirstate. |
|
66 | 66 | ''' |
|
67 | 67 | self._opener = opener |
|
68 | 68 | self._validate = validate |
|
69 | 69 | self._root = root |
|
70 | 70 | self._sparsematchfn = sparsematchfn |
|
71 | 71 | # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is |
|
72 | 72 | # UNC path pointing to root share (issue4557) |
|
73 | 73 | self._rootdir = pathutil.normasprefix(root) |
|
74 | 74 | self._dirty = False |
|
75 | 75 | self._lastnormaltime = 0 |
|
76 | 76 | self._ui = ui |
|
77 | 77 | self._filecache = {} |
|
78 | 78 | self._parentwriters = 0 |
|
79 | 79 | self._filename = 'dirstate' |
|
80 | 80 | self._pendingfilename = '%s.pending' % self._filename |
|
81 | 81 | self._plchangecallbacks = {} |
|
82 | 82 | self._origpl = None |
|
83 | 83 | self._updatedfiles = set() |
|
84 | 84 | self._mapcls = dirstatemap |
|
85 | 85 | # Access and cache cwd early, so we don't access it for the first time |
|
86 | 86 | # after a working-copy update caused it to not exist (accessing it then |
|
87 | 87 | # raises an exception). |
|
88 | 88 | self._cwd |
|
89 | 89 | |
|
90 | 90 | @contextlib.contextmanager |
|
91 | 91 | def parentchange(self): |
|
92 | 92 | '''Context manager for handling dirstate parents. |
|
93 | 93 | |
|
94 | 94 | If an exception occurs in the scope of the context manager, |
|
95 | 95 | the incoherent dirstate won't be written when wlock is |
|
96 | 96 | released. |
|
97 | 97 | ''' |
|
98 | 98 | self._parentwriters += 1 |
|
99 | 99 | yield |
|
100 | 100 | # Typically we want the "undo" step of a context manager in a |
|
101 | 101 | # finally block so it happens even when an exception |
|
102 | 102 | # occurs. In this case, however, we only want to decrement |
|
103 | 103 | # parentwriters if the code in the with statement exits |
|
104 | 104 | # normally, so we don't have a try/finally here on purpose. |
|
105 | 105 | self._parentwriters -= 1 |
|
106 | 106 | |
|
107 | 107 | def pendingparentchange(self): |
|
108 | 108 | '''Returns true if the dirstate is in the middle of a set of changes |
|
109 | 109 | that modify the dirstate parent. |
|
110 | 110 | ''' |
|
111 | 111 | return self._parentwriters > 0 |
|
112 | 112 | |
|
113 | 113 | @propertycache |
|
114 | 114 | def _map(self): |
|
115 | 115 | """Return the dirstate contents (see documentation for dirstatemap).""" |
|
116 | 116 | self._map = self._mapcls(self._ui, self._opener, self._root) |
|
117 | 117 | return self._map |
|
118 | 118 | |
|
119 | 119 | @property |
|
120 | 120 | def _sparsematcher(self): |
|
121 | 121 | """The matcher for the sparse checkout. |
|
122 | 122 | |
|
123 | 123 | The working directory may not include every file from a manifest. The |
|
124 | 124 | matcher obtained by this property will match a path if it is to be |
|
125 | 125 | included in the working directory. |
|
126 | 126 | """ |
|
127 | 127 | # TODO there is potential to cache this property. For now, the matcher |
|
128 | 128 | # is resolved on every access. (But the called function does use a |
|
129 | 129 | # cache to keep the lookup fast.) |
|
130 | 130 | return self._sparsematchfn() |
|
131 | 131 | |
|
132 | 132 | @repocache('branch') |
|
133 | 133 | def _branch(self): |
|
134 | 134 | try: |
|
135 | 135 | return self._opener.read("branch").strip() or "default" |
|
136 | 136 | except IOError as inst: |
|
137 | 137 | if inst.errno != errno.ENOENT: |
|
138 | 138 | raise |
|
139 | 139 | return "default" |
|
140 | 140 | |
|
141 | 141 | @property |
|
142 | 142 | def _pl(self): |
|
143 | 143 | return self._map.parents() |
|
144 | 144 | |
|
145 | 145 | def hasdir(self, d): |
|
146 | 146 | return self._map.hastrackeddir(d) |
|
147 | 147 | |
|
148 | 148 | @rootcache('.hgignore') |
|
149 | 149 | def _ignore(self): |
|
150 | 150 | files = self._ignorefiles() |
|
151 | 151 | if not files: |
|
152 | 152 | return matchmod.never() |
|
153 | 153 | |
|
154 | 154 | pats = ['include:%s' % f for f in files] |
|
155 | 155 | return matchmod.match(self._root, '', [], pats, warn=self._ui.warn) |
|
156 | 156 | |
|
157 | 157 | @propertycache |
|
158 | 158 | def _slash(self): |
|
159 | 159 | return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/' |
|
160 | 160 | |
|
161 | 161 | @propertycache |
|
162 | 162 | def _checklink(self): |
|
163 | 163 | return util.checklink(self._root) |
|
164 | 164 | |
|
165 | 165 | @propertycache |
|
166 | 166 | def _checkexec(self): |
|
167 | 167 | return util.checkexec(self._root) |
|
168 | 168 | |
|
169 | 169 | @propertycache |
|
170 | 170 | def _checkcase(self): |
|
171 | 171 | return not util.fscasesensitive(self._join('.hg')) |
|
172 | 172 | |
|
173 | 173 | def _join(self, f): |
|
174 | 174 | # much faster than os.path.join() |
|
175 | 175 | # it's safe because f is always a relative path |
|
176 | 176 | return self._rootdir + f |
|
177 | 177 | |
|
178 | 178 | def flagfunc(self, buildfallback): |
|
179 | 179 | if self._checklink and self._checkexec: |
|
180 | 180 | def f(x): |
|
181 | 181 | try: |
|
182 | 182 | st = os.lstat(self._join(x)) |
|
183 | 183 | if util.statislink(st): |
|
184 | 184 | return 'l' |
|
185 | 185 | if util.statisexec(st): |
|
186 | 186 | return 'x' |
|
187 | 187 | except OSError: |
|
188 | 188 | pass |
|
189 | 189 | return '' |
|
190 | 190 | return f |
|
191 | 191 | |
|
192 | 192 | fallback = buildfallback() |
|
193 | 193 | if self._checklink: |
|
194 | 194 | def f(x): |
|
195 | 195 | if os.path.islink(self._join(x)): |
|
196 | 196 | return 'l' |
|
197 | 197 | if 'x' in fallback(x): |
|
198 | 198 | return 'x' |
|
199 | 199 | return '' |
|
200 | 200 | return f |
|
201 | 201 | if self._checkexec: |
|
202 | 202 | def f(x): |
|
203 | 203 | if 'l' in fallback(x): |
|
204 | 204 | return 'l' |
|
205 | 205 | if util.isexec(self._join(x)): |
|
206 | 206 | return 'x' |
|
207 | 207 | return '' |
|
208 | 208 | return f |
|
209 | 209 | else: |
|
210 | 210 | return fallback |
|
211 | 211 | |
|
212 | 212 | @propertycache |
|
213 | 213 | def _cwd(self): |
|
214 | 214 | # internal config: ui.forcecwd |
|
215 | 215 | forcecwd = self._ui.config('ui', 'forcecwd') |
|
216 | 216 | if forcecwd: |
|
217 | 217 | return forcecwd |
|
218 | 218 | return encoding.getcwd() |
|
219 | 219 | |
|
220 | 220 | def getcwd(self): |
|
221 | 221 | '''Return the path from which a canonical path is calculated. |
|
222 | 222 | |
|
223 | 223 | This path should be used to resolve file patterns or to convert |
|
224 | 224 | canonical paths back to file paths for display. It shouldn't be |
|
225 | 225 | used to get real file paths. Use vfs functions instead. |
|
226 | 226 | ''' |
|
227 | 227 | cwd = self._cwd |
|
228 | 228 | if cwd == self._root: |
|
229 | 229 | return '' |
|
230 | 230 | # self._root ends with a path separator if self._root is '/' or 'C:\' |
|
231 | 231 | rootsep = self._root |
|
232 | 232 | if not util.endswithsep(rootsep): |
|
233 | 233 | rootsep += pycompat.ossep |
|
234 | 234 | if cwd.startswith(rootsep): |
|
235 | 235 | return cwd[len(rootsep):] |
|
236 | 236 | else: |
|
237 | 237 | # we're outside the repo. return an absolute path. |
|
238 | 238 | return cwd |
|
239 | 239 | |
|
240 | 240 | def pathto(self, f, cwd=None): |
|
241 | 241 | if cwd is None: |
|
242 | 242 | cwd = self.getcwd() |
|
243 | 243 | path = util.pathto(self._root, cwd, f) |
|
244 | 244 | if self._slash: |
|
245 | 245 | return util.pconvert(path) |
|
246 | 246 | return path |
|
247 | 247 | |
|
248 | 248 | def __getitem__(self, key): |
|
249 | 249 | '''Return the current state of key (a filename) in the dirstate. |
|
250 | 250 | |
|
251 | 251 | States are: |
|
252 | 252 | n normal |
|
253 | 253 | m needs merging |
|
254 | 254 | r marked for removal |
|
255 | 255 | a marked for addition |
|
256 | 256 | ? not tracked |
|
257 | 257 | ''' |
|
258 | 258 | return self._map.get(key, ("?",))[0] |
|
259 | 259 | |
|
260 | 260 | def __contains__(self, key): |
|
261 | 261 | return key in self._map |
|
262 | 262 | |
|
263 | 263 | def __iter__(self): |
|
264 | 264 | return iter(sorted(self._map)) |
|
265 | 265 | |
|
266 | 266 | def items(self): |
|
267 | 267 | return self._map.iteritems() |
|
268 | 268 | |
|
269 | 269 | iteritems = items |
|
270 | 270 | |
|
271 | 271 | def parents(self): |
|
272 | 272 | return [self._validate(p) for p in self._pl] |
|
273 | 273 | |
|
274 | 274 | def p1(self): |
|
275 | 275 | return self._validate(self._pl[0]) |
|
276 | 276 | |
|
277 | 277 | def p2(self): |
|
278 | 278 | return self._validate(self._pl[1]) |
|
279 | 279 | |
|
280 | 280 | def branch(self): |
|
281 | 281 | return encoding.tolocal(self._branch) |
|
282 | 282 | |
|
283 | 283 | def setparents(self, p1, p2=nullid): |
|
284 | 284 | """Set dirstate parents to p1 and p2. |
|
285 | 285 | |
|
286 | 286 | When moving from two parents to one, 'm' merged entries a |
|
287 | 287 | adjusted to normal and previous copy records discarded and |
|
288 | 288 | returned by the call. |
|
289 | 289 | |
|
290 | 290 | See localrepo.setparents() |
|
291 | 291 | """ |
|
292 | 292 | if self._parentwriters == 0: |
|
293 | 293 | raise ValueError("cannot set dirstate parent outside of " |
|
294 | 294 | "dirstate.parentchange context manager") |
|
295 | 295 | |
|
296 | 296 | self._dirty = True |
|
297 | 297 | oldp2 = self._pl[1] |
|
298 | 298 | if self._origpl is None: |
|
299 | 299 | self._origpl = self._pl |
|
300 | 300 | self._map.setparents(p1, p2) |
|
301 | 301 | copies = {} |
|
302 | 302 | if oldp2 != nullid and p2 == nullid: |
|
303 | 303 | candidatefiles = self._map.nonnormalset.union( |
|
304 | 304 | self._map.otherparentset) |
|
305 | 305 | for f in candidatefiles: |
|
306 | 306 | s = self._map.get(f) |
|
307 | 307 | if s is None: |
|
308 | 308 | continue |
|
309 | 309 | |
|
310 | 310 | # Discard 'm' markers when moving away from a merge state |
|
311 | 311 | if s[0] == 'm': |
|
312 | 312 | source = self._map.copymap.get(f) |
|
313 | 313 | if source: |
|
314 | 314 | copies[f] = source |
|
315 | 315 | self.normallookup(f) |
|
316 | 316 | # Also fix up otherparent markers |
|
317 | 317 | elif s[0] == 'n' and s[2] == -2: |
|
318 | 318 | source = self._map.copymap.get(f) |
|
319 | 319 | if source: |
|
320 | 320 | copies[f] = source |
|
321 | 321 | self.add(f) |
|
322 | 322 | return copies |
|
323 | 323 | |
|
324 | 324 | def setbranch(self, branch): |
|
325 | 325 | self.__class__._branch.set(self, encoding.fromlocal(branch)) |
|
326 | 326 | f = self._opener('branch', 'w', atomictemp=True, checkambig=True) |
|
327 | 327 | try: |
|
328 | 328 | f.write(self._branch + '\n') |
|
329 | 329 | f.close() |
|
330 | 330 | |
|
331 | 331 | # make sure filecache has the correct stat info for _branch after |
|
332 | 332 | # replacing the underlying file |
|
333 | 333 | ce = self._filecache['_branch'] |
|
334 | 334 | if ce: |
|
335 | 335 | ce.refresh() |
|
336 | 336 | except: # re-raises |
|
337 | 337 | f.discard() |
|
338 | 338 | raise |
|
339 | 339 | |
|
340 | 340 | def invalidate(self): |
|
341 | 341 | '''Causes the next access to reread the dirstate. |
|
342 | 342 | |
|
343 | 343 | This is different from localrepo.invalidatedirstate() because it always |
|
344 | 344 | rereads the dirstate. Use localrepo.invalidatedirstate() if you want to |
|
345 | 345 | check whether the dirstate has changed before rereading it.''' |
|
346 | 346 | |
|
347 | 347 | for a in (r"_map", r"_branch", r"_ignore"): |
|
348 | 348 | if a in self.__dict__: |
|
349 | 349 | delattr(self, a) |
|
350 | 350 | self._lastnormaltime = 0 |
|
351 | 351 | self._dirty = False |
|
352 | 352 | self._updatedfiles.clear() |
|
353 | 353 | self._parentwriters = 0 |
|
354 | 354 | self._origpl = None |
|
355 | 355 | |
|
356 | 356 | def copy(self, source, dest): |
|
357 | 357 | """Mark dest as a copy of source. Unmark dest if source is None.""" |
|
358 | 358 | if source == dest: |
|
359 | 359 | return |
|
360 | 360 | self._dirty = True |
|
361 | 361 | if source is not None: |
|
362 | 362 | self._map.copymap[dest] = source |
|
363 | 363 | self._updatedfiles.add(source) |
|
364 | 364 | self._updatedfiles.add(dest) |
|
365 | 365 | elif self._map.copymap.pop(dest, None): |
|
366 | 366 | self._updatedfiles.add(dest) |
|
367 | 367 | |
|
368 | 368 | def copied(self, file): |
|
369 | 369 | return self._map.copymap.get(file, None) |
|
370 | 370 | |
|
371 | 371 | def copies(self): |
|
372 | 372 | return self._map.copymap |
|
373 | 373 | |
|
374 | 374 | def _addpath(self, f, state, mode, size, mtime): |
|
375 | 375 | oldstate = self[f] |
|
376 | 376 | if state == 'a' or oldstate == 'r': |
|
377 | 377 | scmutil.checkfilename(f) |
|
378 | 378 | if self._map.hastrackeddir(f): |
|
379 | 379 | raise error.Abort(_('directory %r already in dirstate') % |
|
380 | 380 | pycompat.bytestr(f)) |
|
381 | 381 | # shadows |
|
382 | 382 | for d in util.finddirs(f): |
|
383 | 383 | if self._map.hastrackeddir(d): |
|
384 | 384 | break |
|
385 | 385 | entry = self._map.get(d) |
|
386 | 386 | if entry is not None and entry[0] != 'r': |
|
387 | 387 | raise error.Abort( |
|
388 | 388 | _('file %r in dirstate clashes with %r') % |
|
389 | 389 | (pycompat.bytestr(d), pycompat.bytestr(f))) |
|
390 | 390 | self._dirty = True |
|
391 | 391 | self._updatedfiles.add(f) |
|
392 | 392 | self._map.addfile(f, oldstate, state, mode, size, mtime) |
|
393 | 393 | |
|
394 | def normal(self, f): | |
|
395 |
'''Mark a file normal and clean. |
|
|
394 | def normal(self, f, parentfiledata=None): | |
|
395 | '''Mark a file normal and clean. | |
|
396 | ||
|
397 | parentfiledata: (mode, size, mtime) of the clean file | |
|
398 | ||
|
399 | parentfiledata should be computed from memory (for mode, | |
|
400 | size), as or close as possible from the point where we | |
|
401 | determined the file was clean, to limit the risk of the | |
|
402 | file having been changed by an external process between the | |
|
403 | moment where the file was determined to be clean and now.''' | |
|
404 | if parentfiledata: | |
|
405 | (mode, size, mtime) = parentfiledata | |
|
406 | else: | |
|
396 | 407 | s = os.lstat(self._join(f)) |
|
408 | mode = s.st_mode | |
|
409 | size = s.st_size | |
|
397 | 410 | mtime = s[stat.ST_MTIME] |
|
398 |
self._addpath(f, 'n', |
|
|
399 | s.st_size & _rangemask, mtime & _rangemask) | |
|
411 | self._addpath(f, 'n', mode, size & _rangemask, mtime & _rangemask) | |
|
400 | 412 | self._map.copymap.pop(f, None) |
|
401 | 413 | if f in self._map.nonnormalset: |
|
402 | 414 | self._map.nonnormalset.remove(f) |
|
403 | 415 | if mtime > self._lastnormaltime: |
|
404 | 416 | # Remember the most recent modification timeslot for status(), |
|
405 | 417 | # to make sure we won't miss future size-preserving file content |
|
406 | 418 | # modifications that happen within the same timeslot. |
|
407 | 419 | self._lastnormaltime = mtime |
|
408 | 420 | |
|
409 | 421 | def normallookup(self, f): |
|
410 | 422 | '''Mark a file normal, but possibly dirty.''' |
|
411 | 423 | if self._pl[1] != nullid: |
|
412 | 424 | # if there is a merge going on and the file was either |
|
413 | 425 | # in state 'm' (-1) or coming from other parent (-2) before |
|
414 | 426 | # being removed, restore that state. |
|
415 | 427 | entry = self._map.get(f) |
|
416 | 428 | if entry is not None: |
|
417 | 429 | if entry[0] == 'r' and entry[2] in (-1, -2): |
|
418 | 430 | source = self._map.copymap.get(f) |
|
419 | 431 | if entry[2] == -1: |
|
420 | 432 | self.merge(f) |
|
421 | 433 | elif entry[2] == -2: |
|
422 | 434 | self.otherparent(f) |
|
423 | 435 | if source: |
|
424 | 436 | self.copy(source, f) |
|
425 | 437 | return |
|
426 | 438 | if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2: |
|
427 | 439 | return |
|
428 | 440 | self._addpath(f, 'n', 0, -1, -1) |
|
429 | 441 | self._map.copymap.pop(f, None) |
|
430 | 442 | |
|
431 | 443 | def otherparent(self, f): |
|
432 | 444 | '''Mark as coming from the other parent, always dirty.''' |
|
433 | 445 | if self._pl[1] == nullid: |
|
434 | 446 | raise error.Abort(_("setting %r to other parent " |
|
435 | 447 | "only allowed in merges") % f) |
|
436 | 448 | if f in self and self[f] == 'n': |
|
437 | 449 | # merge-like |
|
438 | 450 | self._addpath(f, 'm', 0, -2, -1) |
|
439 | 451 | else: |
|
440 | 452 | # add-like |
|
441 | 453 | self._addpath(f, 'n', 0, -2, -1) |
|
442 | 454 | self._map.copymap.pop(f, None) |
|
443 | 455 | |
|
444 | 456 | def add(self, f): |
|
445 | 457 | '''Mark a file added.''' |
|
446 | 458 | self._addpath(f, 'a', 0, -1, -1) |
|
447 | 459 | self._map.copymap.pop(f, None) |
|
448 | 460 | |
|
449 | 461 | def remove(self, f): |
|
450 | 462 | '''Mark a file removed.''' |
|
451 | 463 | self._dirty = True |
|
452 | 464 | oldstate = self[f] |
|
453 | 465 | size = 0 |
|
454 | 466 | if self._pl[1] != nullid: |
|
455 | 467 | entry = self._map.get(f) |
|
456 | 468 | if entry is not None: |
|
457 | 469 | # backup the previous state |
|
458 | 470 | if entry[0] == 'm': # merge |
|
459 | 471 | size = -1 |
|
460 | 472 | elif entry[0] == 'n' and entry[2] == -2: # other parent |
|
461 | 473 | size = -2 |
|
462 | 474 | self._map.otherparentset.add(f) |
|
463 | 475 | self._updatedfiles.add(f) |
|
464 | 476 | self._map.removefile(f, oldstate, size) |
|
465 | 477 | if size == 0: |
|
466 | 478 | self._map.copymap.pop(f, None) |
|
467 | 479 | |
|
468 | 480 | def merge(self, f): |
|
469 | 481 | '''Mark a file merged.''' |
|
470 | 482 | if self._pl[1] == nullid: |
|
471 | 483 | return self.normallookup(f) |
|
472 | 484 | return self.otherparent(f) |
|
473 | 485 | |
|
474 | 486 | def drop(self, f): |
|
475 | 487 | '''Drop a file from the dirstate''' |
|
476 | 488 | oldstate = self[f] |
|
477 | 489 | if self._map.dropfile(f, oldstate): |
|
478 | 490 | self._dirty = True |
|
479 | 491 | self._updatedfiles.add(f) |
|
480 | 492 | self._map.copymap.pop(f, None) |
|
481 | 493 | |
|
482 | 494 | def _discoverpath(self, path, normed, ignoremissing, exists, storemap): |
|
483 | 495 | if exists is None: |
|
484 | 496 | exists = os.path.lexists(os.path.join(self._root, path)) |
|
485 | 497 | if not exists: |
|
486 | 498 | # Maybe a path component exists |
|
487 | 499 | if not ignoremissing and '/' in path: |
|
488 | 500 | d, f = path.rsplit('/', 1) |
|
489 | 501 | d = self._normalize(d, False, ignoremissing, None) |
|
490 | 502 | folded = d + "/" + f |
|
491 | 503 | else: |
|
492 | 504 | # No path components, preserve original case |
|
493 | 505 | folded = path |
|
494 | 506 | else: |
|
495 | 507 | # recursively normalize leading directory components |
|
496 | 508 | # against dirstate |
|
497 | 509 | if '/' in normed: |
|
498 | 510 | d, f = normed.rsplit('/', 1) |
|
499 | 511 | d = self._normalize(d, False, ignoremissing, True) |
|
500 | 512 | r = self._root + "/" + d |
|
501 | 513 | folded = d + "/" + util.fspath(f, r) |
|
502 | 514 | else: |
|
503 | 515 | folded = util.fspath(normed, self._root) |
|
504 | 516 | storemap[normed] = folded |
|
505 | 517 | |
|
506 | 518 | return folded |
|
507 | 519 | |
|
508 | 520 | def _normalizefile(self, path, isknown, ignoremissing=False, exists=None): |
|
509 | 521 | normed = util.normcase(path) |
|
510 | 522 | folded = self._map.filefoldmap.get(normed, None) |
|
511 | 523 | if folded is None: |
|
512 | 524 | if isknown: |
|
513 | 525 | folded = path |
|
514 | 526 | else: |
|
515 | 527 | folded = self._discoverpath(path, normed, ignoremissing, exists, |
|
516 | 528 | self._map.filefoldmap) |
|
517 | 529 | return folded |
|
518 | 530 | |
|
519 | 531 | def _normalize(self, path, isknown, ignoremissing=False, exists=None): |
|
520 | 532 | normed = util.normcase(path) |
|
521 | 533 | folded = self._map.filefoldmap.get(normed, None) |
|
522 | 534 | if folded is None: |
|
523 | 535 | folded = self._map.dirfoldmap.get(normed, None) |
|
524 | 536 | if folded is None: |
|
525 | 537 | if isknown: |
|
526 | 538 | folded = path |
|
527 | 539 | else: |
|
528 | 540 | # store discovered result in dirfoldmap so that future |
|
529 | 541 | # normalizefile calls don't start matching directories |
|
530 | 542 | folded = self._discoverpath(path, normed, ignoremissing, exists, |
|
531 | 543 | self._map.dirfoldmap) |
|
532 | 544 | return folded |
|
533 | 545 | |
|
534 | 546 | def normalize(self, path, isknown=False, ignoremissing=False): |
|
535 | 547 | ''' |
|
536 | 548 | normalize the case of a pathname when on a casefolding filesystem |
|
537 | 549 | |
|
538 | 550 | isknown specifies whether the filename came from walking the |
|
539 | 551 | disk, to avoid extra filesystem access. |
|
540 | 552 | |
|
541 | 553 | If ignoremissing is True, missing path are returned |
|
542 | 554 | unchanged. Otherwise, we try harder to normalize possibly |
|
543 | 555 | existing path components. |
|
544 | 556 | |
|
545 | 557 | The normalized case is determined based on the following precedence: |
|
546 | 558 | |
|
547 | 559 | - version of name already stored in the dirstate |
|
548 | 560 | - version of name stored on disk |
|
549 | 561 | - version provided via command arguments |
|
550 | 562 | ''' |
|
551 | 563 | |
|
552 | 564 | if self._checkcase: |
|
553 | 565 | return self._normalize(path, isknown, ignoremissing) |
|
554 | 566 | return path |
|
555 | 567 | |
|
556 | 568 | def clear(self): |
|
557 | 569 | self._map.clear() |
|
558 | 570 | self._lastnormaltime = 0 |
|
559 | 571 | self._updatedfiles.clear() |
|
560 | 572 | self._dirty = True |
|
561 | 573 | |
|
562 | 574 | def rebuild(self, parent, allfiles, changedfiles=None): |
|
563 | 575 | if changedfiles is None: |
|
564 | 576 | # Rebuild entire dirstate |
|
565 | 577 | changedfiles = allfiles |
|
566 | 578 | lastnormaltime = self._lastnormaltime |
|
567 | 579 | self.clear() |
|
568 | 580 | self._lastnormaltime = lastnormaltime |
|
569 | 581 | |
|
570 | 582 | if self._origpl is None: |
|
571 | 583 | self._origpl = self._pl |
|
572 | 584 | self._map.setparents(parent, nullid) |
|
573 | 585 | for f in changedfiles: |
|
574 | 586 | if f in allfiles: |
|
575 | 587 | self.normallookup(f) |
|
576 | 588 | else: |
|
577 | 589 | self.drop(f) |
|
578 | 590 | |
|
579 | 591 | self._dirty = True |
|
580 | 592 | |
|
581 | 593 | def identity(self): |
|
582 | 594 | '''Return identity of dirstate itself to detect changing in storage |
|
583 | 595 | |
|
584 | 596 | If identity of previous dirstate is equal to this, writing |
|
585 | 597 | changes based on the former dirstate out can keep consistency. |
|
586 | 598 | ''' |
|
587 | 599 | return self._map.identity |
|
588 | 600 | |
|
589 | 601 | def write(self, tr): |
|
590 | 602 | if not self._dirty: |
|
591 | 603 | return |
|
592 | 604 | |
|
593 | 605 | filename = self._filename |
|
594 | 606 | if tr: |
|
595 | 607 | # 'dirstate.write()' is not only for writing in-memory |
|
596 | 608 | # changes out, but also for dropping ambiguous timestamp. |
|
597 | 609 | # delayed writing re-raise "ambiguous timestamp issue". |
|
598 | 610 | # See also the wiki page below for detail: |
|
599 | 611 | # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan |
|
600 | 612 | |
|
601 | 613 | # emulate dropping timestamp in 'parsers.pack_dirstate' |
|
602 | 614 | now = _getfsnow(self._opener) |
|
603 | 615 | self._map.clearambiguoustimes(self._updatedfiles, now) |
|
604 | 616 | |
|
605 | 617 | # emulate that all 'dirstate.normal' results are written out |
|
606 | 618 | self._lastnormaltime = 0 |
|
607 | 619 | self._updatedfiles.clear() |
|
608 | 620 | |
|
609 | 621 | # delay writing in-memory changes out |
|
610 | 622 | tr.addfilegenerator('dirstate', (self._filename,), |
|
611 | 623 | self._writedirstate, location='plain') |
|
612 | 624 | return |
|
613 | 625 | |
|
614 | 626 | st = self._opener(filename, "w", atomictemp=True, checkambig=True) |
|
615 | 627 | self._writedirstate(st) |
|
616 | 628 | |
|
617 | 629 | def addparentchangecallback(self, category, callback): |
|
618 | 630 | """add a callback to be called when the wd parents are changed |
|
619 | 631 | |
|
620 | 632 | Callback will be called with the following arguments: |
|
621 | 633 | dirstate, (oldp1, oldp2), (newp1, newp2) |
|
622 | 634 | |
|
623 | 635 | Category is a unique identifier to allow overwriting an old callback |
|
624 | 636 | with a newer callback. |
|
625 | 637 | """ |
|
626 | 638 | self._plchangecallbacks[category] = callback |
|
627 | 639 | |
|
628 | 640 | def _writedirstate(self, st): |
|
629 | 641 | # notify callbacks about parents change |
|
630 | 642 | if self._origpl is not None and self._origpl != self._pl: |
|
631 | 643 | for c, callback in sorted(self._plchangecallbacks.iteritems()): |
|
632 | 644 | callback(self, self._origpl, self._pl) |
|
633 | 645 | self._origpl = None |
|
634 | 646 | # use the modification time of the newly created temporary file as the |
|
635 | 647 | # filesystem's notion of 'now' |
|
636 | 648 | now = util.fstat(st)[stat.ST_MTIME] & _rangemask |
|
637 | 649 | |
|
638 | 650 | # enough 'delaywrite' prevents 'pack_dirstate' from dropping |
|
639 | 651 | # timestamp of each entries in dirstate, because of 'now > mtime' |
|
640 | 652 | delaywrite = self._ui.configint('debug', 'dirstate.delaywrite') |
|
641 | 653 | if delaywrite > 0: |
|
642 | 654 | # do we have any files to delay for? |
|
643 | 655 | for f, e in self._map.iteritems(): |
|
644 | 656 | if e[0] == 'n' and e[3] == now: |
|
645 | 657 | import time # to avoid useless import |
|
646 | 658 | # rather than sleep n seconds, sleep until the next |
|
647 | 659 | # multiple of n seconds |
|
648 | 660 | clock = time.time() |
|
649 | 661 | start = int(clock) - (int(clock) % delaywrite) |
|
650 | 662 | end = start + delaywrite |
|
651 | 663 | time.sleep(end - clock) |
|
652 | 664 | now = end # trust our estimate that the end is near now |
|
653 | 665 | break |
|
654 | 666 | |
|
655 | 667 | self._map.write(st, now) |
|
656 | 668 | self._lastnormaltime = 0 |
|
657 | 669 | self._dirty = False |
|
658 | 670 | |
|
659 | 671 | def _dirignore(self, f): |
|
660 | 672 | if self._ignore(f): |
|
661 | 673 | return True |
|
662 | 674 | for p in util.finddirs(f): |
|
663 | 675 | if self._ignore(p): |
|
664 | 676 | return True |
|
665 | 677 | return False |
|
666 | 678 | |
|
667 | 679 | def _ignorefiles(self): |
|
668 | 680 | files = [] |
|
669 | 681 | if os.path.exists(self._join('.hgignore')): |
|
670 | 682 | files.append(self._join('.hgignore')) |
|
671 | 683 | for name, path in self._ui.configitems("ui"): |
|
672 | 684 | if name == 'ignore' or name.startswith('ignore.'): |
|
673 | 685 | # we need to use os.path.join here rather than self._join |
|
674 | 686 | # because path is arbitrary and user-specified |
|
675 | 687 | files.append(os.path.join(self._rootdir, util.expandpath(path))) |
|
676 | 688 | return files |
|
677 | 689 | |
|
678 | 690 | def _ignorefileandline(self, f): |
|
679 | 691 | files = collections.deque(self._ignorefiles()) |
|
680 | 692 | visited = set() |
|
681 | 693 | while files: |
|
682 | 694 | i = files.popleft() |
|
683 | 695 | patterns = matchmod.readpatternfile(i, self._ui.warn, |
|
684 | 696 | sourceinfo=True) |
|
685 | 697 | for pattern, lineno, line in patterns: |
|
686 | 698 | kind, p = matchmod._patsplit(pattern, 'glob') |
|
687 | 699 | if kind == "subinclude": |
|
688 | 700 | if p not in visited: |
|
689 | 701 | files.append(p) |
|
690 | 702 | continue |
|
691 | 703 | m = matchmod.match(self._root, '', [], [pattern], |
|
692 | 704 | warn=self._ui.warn) |
|
693 | 705 | if m(f): |
|
694 | 706 | return (i, lineno, line) |
|
695 | 707 | visited.add(i) |
|
696 | 708 | return (None, -1, "") |
|
697 | 709 | |
|
698 | 710 | def _walkexplicit(self, match, subrepos): |
|
699 | 711 | '''Get stat data about the files explicitly specified by match. |
|
700 | 712 | |
|
701 | 713 | Return a triple (results, dirsfound, dirsnotfound). |
|
702 | 714 | - results is a mapping from filename to stat result. It also contains |
|
703 | 715 | listings mapping subrepos and .hg to None. |
|
704 | 716 | - dirsfound is a list of files found to be directories. |
|
705 | 717 | - dirsnotfound is a list of files that the dirstate thinks are |
|
706 | 718 | directories and that were not found.''' |
|
707 | 719 | |
|
708 | 720 | def badtype(mode): |
|
709 | 721 | kind = _('unknown') |
|
710 | 722 | if stat.S_ISCHR(mode): |
|
711 | 723 | kind = _('character device') |
|
712 | 724 | elif stat.S_ISBLK(mode): |
|
713 | 725 | kind = _('block device') |
|
714 | 726 | elif stat.S_ISFIFO(mode): |
|
715 | 727 | kind = _('fifo') |
|
716 | 728 | elif stat.S_ISSOCK(mode): |
|
717 | 729 | kind = _('socket') |
|
718 | 730 | elif stat.S_ISDIR(mode): |
|
719 | 731 | kind = _('directory') |
|
720 | 732 | return _('unsupported file type (type is %s)') % kind |
|
721 | 733 | |
|
722 | 734 | matchedir = match.explicitdir |
|
723 | 735 | badfn = match.bad |
|
724 | 736 | dmap = self._map |
|
725 | 737 | lstat = os.lstat |
|
726 | 738 | getkind = stat.S_IFMT |
|
727 | 739 | dirkind = stat.S_IFDIR |
|
728 | 740 | regkind = stat.S_IFREG |
|
729 | 741 | lnkkind = stat.S_IFLNK |
|
730 | 742 | join = self._join |
|
731 | 743 | dirsfound = [] |
|
732 | 744 | foundadd = dirsfound.append |
|
733 | 745 | dirsnotfound = [] |
|
734 | 746 | notfoundadd = dirsnotfound.append |
|
735 | 747 | |
|
736 | 748 | if not match.isexact() and self._checkcase: |
|
737 | 749 | normalize = self._normalize |
|
738 | 750 | else: |
|
739 | 751 | normalize = None |
|
740 | 752 | |
|
741 | 753 | files = sorted(match.files()) |
|
742 | 754 | subrepos.sort() |
|
743 | 755 | i, j = 0, 0 |
|
744 | 756 | while i < len(files) and j < len(subrepos): |
|
745 | 757 | subpath = subrepos[j] + "/" |
|
746 | 758 | if files[i] < subpath: |
|
747 | 759 | i += 1 |
|
748 | 760 | continue |
|
749 | 761 | while i < len(files) and files[i].startswith(subpath): |
|
750 | 762 | del files[i] |
|
751 | 763 | j += 1 |
|
752 | 764 | |
|
753 | 765 | if not files or '' in files: |
|
754 | 766 | files = [''] |
|
755 | 767 | # constructing the foldmap is expensive, so don't do it for the |
|
756 | 768 | # common case where files is [''] |
|
757 | 769 | normalize = None |
|
758 | 770 | results = dict.fromkeys(subrepos) |
|
759 | 771 | results['.hg'] = None |
|
760 | 772 | |
|
761 | 773 | for ff in files: |
|
762 | 774 | if normalize: |
|
763 | 775 | nf = normalize(ff, False, True) |
|
764 | 776 | else: |
|
765 | 777 | nf = ff |
|
766 | 778 | if nf in results: |
|
767 | 779 | continue |
|
768 | 780 | |
|
769 | 781 | try: |
|
770 | 782 | st = lstat(join(nf)) |
|
771 | 783 | kind = getkind(st.st_mode) |
|
772 | 784 | if kind == dirkind: |
|
773 | 785 | if nf in dmap: |
|
774 | 786 | # file replaced by dir on disk but still in dirstate |
|
775 | 787 | results[nf] = None |
|
776 | 788 | if matchedir: |
|
777 | 789 | matchedir(nf) |
|
778 | 790 | foundadd((nf, ff)) |
|
779 | 791 | elif kind == regkind or kind == lnkkind: |
|
780 | 792 | results[nf] = st |
|
781 | 793 | else: |
|
782 | 794 | badfn(ff, badtype(kind)) |
|
783 | 795 | if nf in dmap: |
|
784 | 796 | results[nf] = None |
|
785 | 797 | except OSError as inst: # nf not found on disk - it is dirstate only |
|
786 | 798 | if nf in dmap: # does it exactly match a missing file? |
|
787 | 799 | results[nf] = None |
|
788 | 800 | else: # does it match a missing directory? |
|
789 | 801 | if self._map.hasdir(nf): |
|
790 | 802 | if matchedir: |
|
791 | 803 | matchedir(nf) |
|
792 | 804 | notfoundadd(nf) |
|
793 | 805 | else: |
|
794 | 806 | badfn(ff, encoding.strtolocal(inst.strerror)) |
|
795 | 807 | |
|
796 | 808 | # match.files() may contain explicitly-specified paths that shouldn't |
|
797 | 809 | # be taken; drop them from the list of files found. dirsfound/notfound |
|
798 | 810 | # aren't filtered here because they will be tested later. |
|
799 | 811 | if match.anypats(): |
|
800 | 812 | for f in list(results): |
|
801 | 813 | if f == '.hg' or f in subrepos: |
|
802 | 814 | # keep sentinel to disable further out-of-repo walks |
|
803 | 815 | continue |
|
804 | 816 | if not match(f): |
|
805 | 817 | del results[f] |
|
806 | 818 | |
|
807 | 819 | # Case insensitive filesystems cannot rely on lstat() failing to detect |
|
808 | 820 | # a case-only rename. Prune the stat object for any file that does not |
|
809 | 821 | # match the case in the filesystem, if there are multiple files that |
|
810 | 822 | # normalize to the same path. |
|
811 | 823 | if match.isexact() and self._checkcase: |
|
812 | 824 | normed = {} |
|
813 | 825 | |
|
814 | 826 | for f, st in results.iteritems(): |
|
815 | 827 | if st is None: |
|
816 | 828 | continue |
|
817 | 829 | |
|
818 | 830 | nc = util.normcase(f) |
|
819 | 831 | paths = normed.get(nc) |
|
820 | 832 | |
|
821 | 833 | if paths is None: |
|
822 | 834 | paths = set() |
|
823 | 835 | normed[nc] = paths |
|
824 | 836 | |
|
825 | 837 | paths.add(f) |
|
826 | 838 | |
|
827 | 839 | for norm, paths in normed.iteritems(): |
|
828 | 840 | if len(paths) > 1: |
|
829 | 841 | for path in paths: |
|
830 | 842 | folded = self._discoverpath(path, norm, True, None, |
|
831 | 843 | self._map.dirfoldmap) |
|
832 | 844 | if path != folded: |
|
833 | 845 | results[path] = None |
|
834 | 846 | |
|
835 | 847 | return results, dirsfound, dirsnotfound |
|
836 | 848 | |
|
837 | 849 | def walk(self, match, subrepos, unknown, ignored, full=True): |
|
838 | 850 | ''' |
|
839 | 851 | Walk recursively through the directory tree, finding all files |
|
840 | 852 | matched by match. |
|
841 | 853 | |
|
842 | 854 | If full is False, maybe skip some known-clean files. |
|
843 | 855 | |
|
844 | 856 | Return a dict mapping filename to stat-like object (either |
|
845 | 857 | mercurial.osutil.stat instance or return value of os.stat()). |
|
846 | 858 | |
|
847 | 859 | ''' |
|
848 | 860 | # full is a flag that extensions that hook into walk can use -- this |
|
849 | 861 | # implementation doesn't use it at all. This satisfies the contract |
|
850 | 862 | # because we only guarantee a "maybe". |
|
851 | 863 | |
|
852 | 864 | if ignored: |
|
853 | 865 | ignore = util.never |
|
854 | 866 | dirignore = util.never |
|
855 | 867 | elif unknown: |
|
856 | 868 | ignore = self._ignore |
|
857 | 869 | dirignore = self._dirignore |
|
858 | 870 | else: |
|
859 | 871 | # if not unknown and not ignored, drop dir recursion and step 2 |
|
860 | 872 | ignore = util.always |
|
861 | 873 | dirignore = util.always |
|
862 | 874 | |
|
863 | 875 | matchfn = match.matchfn |
|
864 | 876 | matchalways = match.always() |
|
865 | 877 | matchtdir = match.traversedir |
|
866 | 878 | dmap = self._map |
|
867 | 879 | listdir = util.listdir |
|
868 | 880 | lstat = os.lstat |
|
869 | 881 | dirkind = stat.S_IFDIR |
|
870 | 882 | regkind = stat.S_IFREG |
|
871 | 883 | lnkkind = stat.S_IFLNK |
|
872 | 884 | join = self._join |
|
873 | 885 | |
|
874 | 886 | exact = skipstep3 = False |
|
875 | 887 | if match.isexact(): # match.exact |
|
876 | 888 | exact = True |
|
877 | 889 | dirignore = util.always # skip step 2 |
|
878 | 890 | elif match.prefix(): # match.match, no patterns |
|
879 | 891 | skipstep3 = True |
|
880 | 892 | |
|
881 | 893 | if not exact and self._checkcase: |
|
882 | 894 | normalize = self._normalize |
|
883 | 895 | normalizefile = self._normalizefile |
|
884 | 896 | skipstep3 = False |
|
885 | 897 | else: |
|
886 | 898 | normalize = self._normalize |
|
887 | 899 | normalizefile = None |
|
888 | 900 | |
|
889 | 901 | # step 1: find all explicit files |
|
890 | 902 | results, work, dirsnotfound = self._walkexplicit(match, subrepos) |
|
891 | 903 | |
|
892 | 904 | skipstep3 = skipstep3 and not (work or dirsnotfound) |
|
893 | 905 | work = [d for d in work if not dirignore(d[0])] |
|
894 | 906 | |
|
895 | 907 | # step 2: visit subdirectories |
|
896 | 908 | def traverse(work, alreadynormed): |
|
897 | 909 | wadd = work.append |
|
898 | 910 | while work: |
|
899 | 911 | nd = work.pop() |
|
900 | 912 | visitentries = match.visitchildrenset(nd) |
|
901 | 913 | if not visitentries: |
|
902 | 914 | continue |
|
903 | 915 | if visitentries == 'this' or visitentries == 'all': |
|
904 | 916 | visitentries = None |
|
905 | 917 | skip = None |
|
906 | 918 | if nd != '': |
|
907 | 919 | skip = '.hg' |
|
908 | 920 | try: |
|
909 | 921 | entries = listdir(join(nd), stat=True, skip=skip) |
|
910 | 922 | except OSError as inst: |
|
911 | 923 | if inst.errno in (errno.EACCES, errno.ENOENT): |
|
912 | 924 | match.bad(self.pathto(nd), |
|
913 | 925 | encoding.strtolocal(inst.strerror)) |
|
914 | 926 | continue |
|
915 | 927 | raise |
|
916 | 928 | for f, kind, st in entries: |
|
917 | 929 | # Some matchers may return files in the visitentries set, |
|
918 | 930 | # instead of 'this', if the matcher explicitly mentions them |
|
919 | 931 | # and is not an exactmatcher. This is acceptable; we do not |
|
920 | 932 | # make any hard assumptions about file-or-directory below |
|
921 | 933 | # based on the presence of `f` in visitentries. If |
|
922 | 934 | # visitchildrenset returned a set, we can always skip the |
|
923 | 935 | # entries *not* in the set it provided regardless of whether |
|
924 | 936 | # they're actually a file or a directory. |
|
925 | 937 | if visitentries and f not in visitentries: |
|
926 | 938 | continue |
|
927 | 939 | if normalizefile: |
|
928 | 940 | # even though f might be a directory, we're only |
|
929 | 941 | # interested in comparing it to files currently in the |
|
930 | 942 | # dmap -- therefore normalizefile is enough |
|
931 | 943 | nf = normalizefile(nd and (nd + "/" + f) or f, True, |
|
932 | 944 | True) |
|
933 | 945 | else: |
|
934 | 946 | nf = nd and (nd + "/" + f) or f |
|
935 | 947 | if nf not in results: |
|
936 | 948 | if kind == dirkind: |
|
937 | 949 | if not ignore(nf): |
|
938 | 950 | if matchtdir: |
|
939 | 951 | matchtdir(nf) |
|
940 | 952 | wadd(nf) |
|
941 | 953 | if nf in dmap and (matchalways or matchfn(nf)): |
|
942 | 954 | results[nf] = None |
|
943 | 955 | elif kind == regkind or kind == lnkkind: |
|
944 | 956 | if nf in dmap: |
|
945 | 957 | if matchalways or matchfn(nf): |
|
946 | 958 | results[nf] = st |
|
947 | 959 | elif ((matchalways or matchfn(nf)) |
|
948 | 960 | and not ignore(nf)): |
|
949 | 961 | # unknown file -- normalize if necessary |
|
950 | 962 | if not alreadynormed: |
|
951 | 963 | nf = normalize(nf, False, True) |
|
952 | 964 | results[nf] = st |
|
953 | 965 | elif nf in dmap and (matchalways or matchfn(nf)): |
|
954 | 966 | results[nf] = None |
|
955 | 967 | |
|
956 | 968 | for nd, d in work: |
|
957 | 969 | # alreadynormed means that processwork doesn't have to do any |
|
958 | 970 | # expensive directory normalization |
|
959 | 971 | alreadynormed = not normalize or nd == d |
|
960 | 972 | traverse([d], alreadynormed) |
|
961 | 973 | |
|
962 | 974 | for s in subrepos: |
|
963 | 975 | del results[s] |
|
964 | 976 | del results['.hg'] |
|
965 | 977 | |
|
966 | 978 | # step 3: visit remaining files from dmap |
|
967 | 979 | if not skipstep3 and not exact: |
|
968 | 980 | # If a dmap file is not in results yet, it was either |
|
969 | 981 | # a) not matching matchfn b) ignored, c) missing, or d) under a |
|
970 | 982 | # symlink directory. |
|
971 | 983 | if not results and matchalways: |
|
972 | 984 | visit = [f for f in dmap] |
|
973 | 985 | else: |
|
974 | 986 | visit = [f for f in dmap if f not in results and matchfn(f)] |
|
975 | 987 | visit.sort() |
|
976 | 988 | |
|
977 | 989 | if unknown: |
|
978 | 990 | # unknown == True means we walked all dirs under the roots |
|
979 | 991 | # that wasn't ignored, and everything that matched was stat'ed |
|
980 | 992 | # and is already in results. |
|
981 | 993 | # The rest must thus be ignored or under a symlink. |
|
982 | 994 | audit_path = pathutil.pathauditor(self._root, cached=True) |
|
983 | 995 | |
|
984 | 996 | for nf in iter(visit): |
|
985 | 997 | # If a stat for the same file was already added with a |
|
986 | 998 | # different case, don't add one for this, since that would |
|
987 | 999 | # make it appear as if the file exists under both names |
|
988 | 1000 | # on disk. |
|
989 | 1001 | if (normalizefile and |
|
990 | 1002 | normalizefile(nf, True, True) in results): |
|
991 | 1003 | results[nf] = None |
|
992 | 1004 | # Report ignored items in the dmap as long as they are not |
|
993 | 1005 | # under a symlink directory. |
|
994 | 1006 | elif audit_path.check(nf): |
|
995 | 1007 | try: |
|
996 | 1008 | results[nf] = lstat(join(nf)) |
|
997 | 1009 | # file was just ignored, no links, and exists |
|
998 | 1010 | except OSError: |
|
999 | 1011 | # file doesn't exist |
|
1000 | 1012 | results[nf] = None |
|
1001 | 1013 | else: |
|
1002 | 1014 | # It's either missing or under a symlink directory |
|
1003 | 1015 | # which we in this case report as missing |
|
1004 | 1016 | results[nf] = None |
|
1005 | 1017 | else: |
|
1006 | 1018 | # We may not have walked the full directory tree above, |
|
1007 | 1019 | # so stat and check everything we missed. |
|
1008 | 1020 | iv = iter(visit) |
|
1009 | 1021 | for st in util.statfiles([join(i) for i in visit]): |
|
1010 | 1022 | results[next(iv)] = st |
|
1011 | 1023 | return results |
|
1012 | 1024 | |
|
1013 | 1025 | def status(self, match, subrepos, ignored, clean, unknown): |
|
1014 | 1026 | '''Determine the status of the working copy relative to the |
|
1015 | 1027 | dirstate and return a pair of (unsure, status), where status is of type |
|
1016 | 1028 | scmutil.status and: |
|
1017 | 1029 | |
|
1018 | 1030 | unsure: |
|
1019 | 1031 | files that might have been modified since the dirstate was |
|
1020 | 1032 | written, but need to be read to be sure (size is the same |
|
1021 | 1033 | but mtime differs) |
|
1022 | 1034 | status.modified: |
|
1023 | 1035 | files that have definitely been modified since the dirstate |
|
1024 | 1036 | was written (different size or mode) |
|
1025 | 1037 | status.clean: |
|
1026 | 1038 | files that have definitely not been modified since the |
|
1027 | 1039 | dirstate was written |
|
1028 | 1040 | ''' |
|
1029 | 1041 | listignored, listclean, listunknown = ignored, clean, unknown |
|
1030 | 1042 | lookup, modified, added, unknown, ignored = [], [], [], [], [] |
|
1031 | 1043 | removed, deleted, clean = [], [], [] |
|
1032 | 1044 | |
|
1033 | 1045 | dmap = self._map |
|
1034 | 1046 | dmap.preload() |
|
1035 | 1047 | dcontains = dmap.__contains__ |
|
1036 | 1048 | dget = dmap.__getitem__ |
|
1037 | 1049 | ladd = lookup.append # aka "unsure" |
|
1038 | 1050 | madd = modified.append |
|
1039 | 1051 | aadd = added.append |
|
1040 | 1052 | uadd = unknown.append |
|
1041 | 1053 | iadd = ignored.append |
|
1042 | 1054 | radd = removed.append |
|
1043 | 1055 | dadd = deleted.append |
|
1044 | 1056 | cadd = clean.append |
|
1045 | 1057 | mexact = match.exact |
|
1046 | 1058 | dirignore = self._dirignore |
|
1047 | 1059 | checkexec = self._checkexec |
|
1048 | 1060 | copymap = self._map.copymap |
|
1049 | 1061 | lastnormaltime = self._lastnormaltime |
|
1050 | 1062 | |
|
1051 | 1063 | # We need to do full walks when either |
|
1052 | 1064 | # - we're listing all clean files, or |
|
1053 | 1065 | # - match.traversedir does something, because match.traversedir should |
|
1054 | 1066 | # be called for every dir in the working dir |
|
1055 | 1067 | full = listclean or match.traversedir is not None |
|
1056 | 1068 | for fn, st in self.walk(match, subrepos, listunknown, listignored, |
|
1057 | 1069 | full=full).iteritems(): |
|
1058 | 1070 | if not dcontains(fn): |
|
1059 | 1071 | if (listignored or mexact(fn)) and dirignore(fn): |
|
1060 | 1072 | if listignored: |
|
1061 | 1073 | iadd(fn) |
|
1062 | 1074 | else: |
|
1063 | 1075 | uadd(fn) |
|
1064 | 1076 | continue |
|
1065 | 1077 | |
|
1066 | 1078 | # This is equivalent to 'state, mode, size, time = dmap[fn]' but not |
|
1067 | 1079 | # written like that for performance reasons. dmap[fn] is not a |
|
1068 | 1080 | # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE |
|
1069 | 1081 | # opcode has fast paths when the value to be unpacked is a tuple or |
|
1070 | 1082 | # a list, but falls back to creating a full-fledged iterator in |
|
1071 | 1083 | # general. That is much slower than simply accessing and storing the |
|
1072 | 1084 | # tuple members one by one. |
|
1073 | 1085 | t = dget(fn) |
|
1074 | 1086 | state = t[0] |
|
1075 | 1087 | mode = t[1] |
|
1076 | 1088 | size = t[2] |
|
1077 | 1089 | time = t[3] |
|
1078 | 1090 | |
|
1079 | 1091 | if not st and state in "nma": |
|
1080 | 1092 | dadd(fn) |
|
1081 | 1093 | elif state == 'n': |
|
1082 | 1094 | if (size >= 0 and |
|
1083 | 1095 | ((size != st.st_size and size != st.st_size & _rangemask) |
|
1084 | 1096 | or ((mode ^ st.st_mode) & 0o100 and checkexec)) |
|
1085 | 1097 | or size == -2 # other parent |
|
1086 | 1098 | or fn in copymap): |
|
1087 | 1099 | madd(fn) |
|
1088 | 1100 | elif (time != st[stat.ST_MTIME] |
|
1089 | 1101 | and time != st[stat.ST_MTIME] & _rangemask): |
|
1090 | 1102 | ladd(fn) |
|
1091 | 1103 | elif st[stat.ST_MTIME] == lastnormaltime: |
|
1092 | 1104 | # fn may have just been marked as normal and it may have |
|
1093 | 1105 | # changed in the same second without changing its size. |
|
1094 | 1106 | # This can happen if we quickly do multiple commits. |
|
1095 | 1107 | # Force lookup, so we don't miss such a racy file change. |
|
1096 | 1108 | ladd(fn) |
|
1097 | 1109 | elif listclean: |
|
1098 | 1110 | cadd(fn) |
|
1099 | 1111 | elif state == 'm': |
|
1100 | 1112 | madd(fn) |
|
1101 | 1113 | elif state == 'a': |
|
1102 | 1114 | aadd(fn) |
|
1103 | 1115 | elif state == 'r': |
|
1104 | 1116 | radd(fn) |
|
1105 | 1117 | |
|
1106 | 1118 | return (lookup, scmutil.status(modified, added, removed, deleted, |
|
1107 | 1119 | unknown, ignored, clean)) |
|
1108 | 1120 | |
|
1109 | 1121 | def matches(self, match): |
|
1110 | 1122 | ''' |
|
1111 | 1123 | return files in the dirstate (in whatever state) filtered by match |
|
1112 | 1124 | ''' |
|
1113 | 1125 | dmap = self._map |
|
1114 | 1126 | if match.always(): |
|
1115 | 1127 | return dmap.keys() |
|
1116 | 1128 | files = match.files() |
|
1117 | 1129 | if match.isexact(): |
|
1118 | 1130 | # fast path -- filter the other way around, since typically files is |
|
1119 | 1131 | # much smaller than dmap |
|
1120 | 1132 | return [f for f in files if f in dmap] |
|
1121 | 1133 | if match.prefix() and all(fn in dmap for fn in files): |
|
1122 | 1134 | # fast path -- all the values are known to be files, so just return |
|
1123 | 1135 | # that |
|
1124 | 1136 | return list(files) |
|
1125 | 1137 | return [f for f in dmap if match(f)] |
|
1126 | 1138 | |
|
1127 | 1139 | def _actualfilename(self, tr): |
|
1128 | 1140 | if tr: |
|
1129 | 1141 | return self._pendingfilename |
|
1130 | 1142 | else: |
|
1131 | 1143 | return self._filename |
|
1132 | 1144 | |
|
1133 | 1145 | def savebackup(self, tr, backupname): |
|
1134 | 1146 | '''Save current dirstate into backup file''' |
|
1135 | 1147 | filename = self._actualfilename(tr) |
|
1136 | 1148 | assert backupname != filename |
|
1137 | 1149 | |
|
1138 | 1150 | # use '_writedirstate' instead of 'write' to write changes certainly, |
|
1139 | 1151 | # because the latter omits writing out if transaction is running. |
|
1140 | 1152 | # output file will be used to create backup of dirstate at this point. |
|
1141 | 1153 | if self._dirty or not self._opener.exists(filename): |
|
1142 | 1154 | self._writedirstate(self._opener(filename, "w", atomictemp=True, |
|
1143 | 1155 | checkambig=True)) |
|
1144 | 1156 | |
|
1145 | 1157 | if tr: |
|
1146 | 1158 | # ensure that subsequent tr.writepending returns True for |
|
1147 | 1159 | # changes written out above, even if dirstate is never |
|
1148 | 1160 | # changed after this |
|
1149 | 1161 | tr.addfilegenerator('dirstate', (self._filename,), |
|
1150 | 1162 | self._writedirstate, location='plain') |
|
1151 | 1163 | |
|
1152 | 1164 | # ensure that pending file written above is unlinked at |
|
1153 | 1165 | # failure, even if tr.writepending isn't invoked until the |
|
1154 | 1166 | # end of this transaction |
|
1155 | 1167 | tr.registertmp(filename, location='plain') |
|
1156 | 1168 | |
|
1157 | 1169 | self._opener.tryunlink(backupname) |
|
1158 | 1170 | # hardlink backup is okay because _writedirstate is always called |
|
1159 | 1171 | # with an "atomictemp=True" file. |
|
1160 | 1172 | util.copyfile(self._opener.join(filename), |
|
1161 | 1173 | self._opener.join(backupname), hardlink=True) |
|
1162 | 1174 | |
|
1163 | 1175 | def restorebackup(self, tr, backupname): |
|
1164 | 1176 | '''Restore dirstate by backup file''' |
|
1165 | 1177 | # this "invalidate()" prevents "wlock.release()" from writing |
|
1166 | 1178 | # changes of dirstate out after restoring from backup file |
|
1167 | 1179 | self.invalidate() |
|
1168 | 1180 | filename = self._actualfilename(tr) |
|
1169 | 1181 | o = self._opener |
|
1170 | 1182 | if util.samefile(o.join(backupname), o.join(filename)): |
|
1171 | 1183 | o.unlink(backupname) |
|
1172 | 1184 | else: |
|
1173 | 1185 | o.rename(backupname, filename, checkambig=True) |
|
1174 | 1186 | |
|
1175 | 1187 | def clearbackup(self, tr, backupname): |
|
1176 | 1188 | '''Clear backup file''' |
|
1177 | 1189 | self._opener.unlink(backupname) |
|
1178 | 1190 | |
|
1179 | 1191 | class dirstatemap(object): |
|
1180 | 1192 | """Map encapsulating the dirstate's contents. |
|
1181 | 1193 | |
|
1182 | 1194 | The dirstate contains the following state: |
|
1183 | 1195 | |
|
1184 | 1196 | - `identity` is the identity of the dirstate file, which can be used to |
|
1185 | 1197 | detect when changes have occurred to the dirstate file. |
|
1186 | 1198 | |
|
1187 | 1199 | - `parents` is a pair containing the parents of the working copy. The |
|
1188 | 1200 | parents are updated by calling `setparents`. |
|
1189 | 1201 | |
|
1190 | 1202 | - the state map maps filenames to tuples of (state, mode, size, mtime), |
|
1191 | 1203 | where state is a single character representing 'normal', 'added', |
|
1192 | 1204 | 'removed', or 'merged'. It is read by treating the dirstate as a |
|
1193 | 1205 | dict. File state is updated by calling the `addfile`, `removefile` and |
|
1194 | 1206 | `dropfile` methods. |
|
1195 | 1207 | |
|
1196 | 1208 | - `copymap` maps destination filenames to their source filename. |
|
1197 | 1209 | |
|
1198 | 1210 | The dirstate also provides the following views onto the state: |
|
1199 | 1211 | |
|
1200 | 1212 | - `nonnormalset` is a set of the filenames that have state other |
|
1201 | 1213 | than 'normal', or are normal but have an mtime of -1 ('normallookup'). |
|
1202 | 1214 | |
|
1203 | 1215 | - `otherparentset` is a set of the filenames that are marked as coming |
|
1204 | 1216 | from the second parent when the dirstate is currently being merged. |
|
1205 | 1217 | |
|
1206 | 1218 | - `filefoldmap` is a dict mapping normalized filenames to the denormalized |
|
1207 | 1219 | form that they appear as in the dirstate. |
|
1208 | 1220 | |
|
1209 | 1221 | - `dirfoldmap` is a dict mapping normalized directory names to the |
|
1210 | 1222 | denormalized form that they appear as in the dirstate. |
|
1211 | 1223 | """ |
|
1212 | 1224 | |
|
1213 | 1225 | def __init__(self, ui, opener, root): |
|
1214 | 1226 | self._ui = ui |
|
1215 | 1227 | self._opener = opener |
|
1216 | 1228 | self._root = root |
|
1217 | 1229 | self._filename = 'dirstate' |
|
1218 | 1230 | |
|
1219 | 1231 | self._parents = None |
|
1220 | 1232 | self._dirtyparents = False |
|
1221 | 1233 | |
|
1222 | 1234 | # for consistent view between _pl() and _read() invocations |
|
1223 | 1235 | self._pendingmode = None |
|
1224 | 1236 | |
|
1225 | 1237 | @propertycache |
|
1226 | 1238 | def _map(self): |
|
1227 | 1239 | self._map = {} |
|
1228 | 1240 | self.read() |
|
1229 | 1241 | return self._map |
|
1230 | 1242 | |
|
1231 | 1243 | @propertycache |
|
1232 | 1244 | def copymap(self): |
|
1233 | 1245 | self.copymap = {} |
|
1234 | 1246 | self._map |
|
1235 | 1247 | return self.copymap |
|
1236 | 1248 | |
|
1237 | 1249 | def clear(self): |
|
1238 | 1250 | self._map.clear() |
|
1239 | 1251 | self.copymap.clear() |
|
1240 | 1252 | self.setparents(nullid, nullid) |
|
1241 | 1253 | util.clearcachedproperty(self, "_dirs") |
|
1242 | 1254 | util.clearcachedproperty(self, "_alldirs") |
|
1243 | 1255 | util.clearcachedproperty(self, "filefoldmap") |
|
1244 | 1256 | util.clearcachedproperty(self, "dirfoldmap") |
|
1245 | 1257 | util.clearcachedproperty(self, "nonnormalset") |
|
1246 | 1258 | util.clearcachedproperty(self, "otherparentset") |
|
1247 | 1259 | |
|
1248 | 1260 | def items(self): |
|
1249 | 1261 | return self._map.iteritems() |
|
1250 | 1262 | |
|
1251 | 1263 | # forward for python2,3 compat |
|
1252 | 1264 | iteritems = items |
|
1253 | 1265 | |
|
1254 | 1266 | def __len__(self): |
|
1255 | 1267 | return len(self._map) |
|
1256 | 1268 | |
|
1257 | 1269 | def __iter__(self): |
|
1258 | 1270 | return iter(self._map) |
|
1259 | 1271 | |
|
1260 | 1272 | def get(self, key, default=None): |
|
1261 | 1273 | return self._map.get(key, default) |
|
1262 | 1274 | |
|
1263 | 1275 | def __contains__(self, key): |
|
1264 | 1276 | return key in self._map |
|
1265 | 1277 | |
|
1266 | 1278 | def __getitem__(self, key): |
|
1267 | 1279 | return self._map[key] |
|
1268 | 1280 | |
|
1269 | 1281 | def keys(self): |
|
1270 | 1282 | return self._map.keys() |
|
1271 | 1283 | |
|
1272 | 1284 | def preload(self): |
|
1273 | 1285 | """Loads the underlying data, if it's not already loaded""" |
|
1274 | 1286 | self._map |
|
1275 | 1287 | |
|
1276 | 1288 | def addfile(self, f, oldstate, state, mode, size, mtime): |
|
1277 | 1289 | """Add a tracked file to the dirstate.""" |
|
1278 | 1290 | if oldstate in "?r" and r"_dirs" in self.__dict__: |
|
1279 | 1291 | self._dirs.addpath(f) |
|
1280 | 1292 | if oldstate == "?" and r"_alldirs" in self.__dict__: |
|
1281 | 1293 | self._alldirs.addpath(f) |
|
1282 | 1294 | self._map[f] = dirstatetuple(state, mode, size, mtime) |
|
1283 | 1295 | if state != 'n' or mtime == -1: |
|
1284 | 1296 | self.nonnormalset.add(f) |
|
1285 | 1297 | if size == -2: |
|
1286 | 1298 | self.otherparentset.add(f) |
|
1287 | 1299 | |
|
1288 | 1300 | def removefile(self, f, oldstate, size): |
|
1289 | 1301 | """ |
|
1290 | 1302 | Mark a file as removed in the dirstate. |
|
1291 | 1303 | |
|
1292 | 1304 | The `size` parameter is used to store sentinel values that indicate |
|
1293 | 1305 | the file's previous state. In the future, we should refactor this |
|
1294 | 1306 | to be more explicit about what that state is. |
|
1295 | 1307 | """ |
|
1296 | 1308 | if oldstate not in "?r" and r"_dirs" in self.__dict__: |
|
1297 | 1309 | self._dirs.delpath(f) |
|
1298 | 1310 | if oldstate == "?" and r"_alldirs" in self.__dict__: |
|
1299 | 1311 | self._alldirs.addpath(f) |
|
1300 | 1312 | if r"filefoldmap" in self.__dict__: |
|
1301 | 1313 | normed = util.normcase(f) |
|
1302 | 1314 | self.filefoldmap.pop(normed, None) |
|
1303 | 1315 | self._map[f] = dirstatetuple('r', 0, size, 0) |
|
1304 | 1316 | self.nonnormalset.add(f) |
|
1305 | 1317 | |
|
1306 | 1318 | def dropfile(self, f, oldstate): |
|
1307 | 1319 | """ |
|
1308 | 1320 | Remove a file from the dirstate. Returns True if the file was |
|
1309 | 1321 | previously recorded. |
|
1310 | 1322 | """ |
|
1311 | 1323 | exists = self._map.pop(f, None) is not None |
|
1312 | 1324 | if exists: |
|
1313 | 1325 | if oldstate != "r" and r"_dirs" in self.__dict__: |
|
1314 | 1326 | self._dirs.delpath(f) |
|
1315 | 1327 | if r"_alldirs" in self.__dict__: |
|
1316 | 1328 | self._alldirs.delpath(f) |
|
1317 | 1329 | if r"filefoldmap" in self.__dict__: |
|
1318 | 1330 | normed = util.normcase(f) |
|
1319 | 1331 | self.filefoldmap.pop(normed, None) |
|
1320 | 1332 | self.nonnormalset.discard(f) |
|
1321 | 1333 | return exists |
|
1322 | 1334 | |
|
1323 | 1335 | def clearambiguoustimes(self, files, now): |
|
1324 | 1336 | for f in files: |
|
1325 | 1337 | e = self.get(f) |
|
1326 | 1338 | if e is not None and e[0] == 'n' and e[3] == now: |
|
1327 | 1339 | self._map[f] = dirstatetuple(e[0], e[1], e[2], -1) |
|
1328 | 1340 | self.nonnormalset.add(f) |
|
1329 | 1341 | |
|
1330 | 1342 | def nonnormalentries(self): |
|
1331 | 1343 | '''Compute the nonnormal dirstate entries from the dmap''' |
|
1332 | 1344 | try: |
|
1333 | 1345 | return parsers.nonnormalotherparententries(self._map) |
|
1334 | 1346 | except AttributeError: |
|
1335 | 1347 | nonnorm = set() |
|
1336 | 1348 | otherparent = set() |
|
1337 | 1349 | for fname, e in self._map.iteritems(): |
|
1338 | 1350 | if e[0] != 'n' or e[3] == -1: |
|
1339 | 1351 | nonnorm.add(fname) |
|
1340 | 1352 | if e[0] == 'n' and e[2] == -2: |
|
1341 | 1353 | otherparent.add(fname) |
|
1342 | 1354 | return nonnorm, otherparent |
|
1343 | 1355 | |
|
1344 | 1356 | @propertycache |
|
1345 | 1357 | def filefoldmap(self): |
|
1346 | 1358 | """Returns a dictionary mapping normalized case paths to their |
|
1347 | 1359 | non-normalized versions. |
|
1348 | 1360 | """ |
|
1349 | 1361 | try: |
|
1350 | 1362 | makefilefoldmap = parsers.make_file_foldmap |
|
1351 | 1363 | except AttributeError: |
|
1352 | 1364 | pass |
|
1353 | 1365 | else: |
|
1354 | 1366 | return makefilefoldmap(self._map, util.normcasespec, |
|
1355 | 1367 | util.normcasefallback) |
|
1356 | 1368 | |
|
1357 | 1369 | f = {} |
|
1358 | 1370 | normcase = util.normcase |
|
1359 | 1371 | for name, s in self._map.iteritems(): |
|
1360 | 1372 | if s[0] != 'r': |
|
1361 | 1373 | f[normcase(name)] = name |
|
1362 | 1374 | f['.'] = '.' # prevents useless util.fspath() invocation |
|
1363 | 1375 | return f |
|
1364 | 1376 | |
|
1365 | 1377 | def hastrackeddir(self, d): |
|
1366 | 1378 | """ |
|
1367 | 1379 | Returns True if the dirstate contains a tracked (not removed) file |
|
1368 | 1380 | in this directory. |
|
1369 | 1381 | """ |
|
1370 | 1382 | return d in self._dirs |
|
1371 | 1383 | |
|
1372 | 1384 | def hasdir(self, d): |
|
1373 | 1385 | """ |
|
1374 | 1386 | Returns True if the dirstate contains a file (tracked or removed) |
|
1375 | 1387 | in this directory. |
|
1376 | 1388 | """ |
|
1377 | 1389 | return d in self._alldirs |
|
1378 | 1390 | |
|
1379 | 1391 | @propertycache |
|
1380 | 1392 | def _dirs(self): |
|
1381 | 1393 | return util.dirs(self._map, 'r') |
|
1382 | 1394 | |
|
1383 | 1395 | @propertycache |
|
1384 | 1396 | def _alldirs(self): |
|
1385 | 1397 | return util.dirs(self._map) |
|
1386 | 1398 | |
|
1387 | 1399 | def _opendirstatefile(self): |
|
1388 | 1400 | fp, mode = txnutil.trypending(self._root, self._opener, self._filename) |
|
1389 | 1401 | if self._pendingmode is not None and self._pendingmode != mode: |
|
1390 | 1402 | fp.close() |
|
1391 | 1403 | raise error.Abort(_('working directory state may be ' |
|
1392 | 1404 | 'changed parallelly')) |
|
1393 | 1405 | self._pendingmode = mode |
|
1394 | 1406 | return fp |
|
1395 | 1407 | |
|
1396 | 1408 | def parents(self): |
|
1397 | 1409 | if not self._parents: |
|
1398 | 1410 | try: |
|
1399 | 1411 | fp = self._opendirstatefile() |
|
1400 | 1412 | st = fp.read(40) |
|
1401 | 1413 | fp.close() |
|
1402 | 1414 | except IOError as err: |
|
1403 | 1415 | if err.errno != errno.ENOENT: |
|
1404 | 1416 | raise |
|
1405 | 1417 | # File doesn't exist, so the current state is empty |
|
1406 | 1418 | st = '' |
|
1407 | 1419 | |
|
1408 | 1420 | l = len(st) |
|
1409 | 1421 | if l == 40: |
|
1410 | 1422 | self._parents = (st[:20], st[20:40]) |
|
1411 | 1423 | elif l == 0: |
|
1412 | 1424 | self._parents = (nullid, nullid) |
|
1413 | 1425 | else: |
|
1414 | 1426 | raise error.Abort(_('working directory state appears ' |
|
1415 | 1427 | 'damaged!')) |
|
1416 | 1428 | |
|
1417 | 1429 | return self._parents |
|
1418 | 1430 | |
|
1419 | 1431 | def setparents(self, p1, p2): |
|
1420 | 1432 | self._parents = (p1, p2) |
|
1421 | 1433 | self._dirtyparents = True |
|
1422 | 1434 | |
|
1423 | 1435 | def read(self): |
|
1424 | 1436 | # ignore HG_PENDING because identity is used only for writing |
|
1425 | 1437 | self.identity = util.filestat.frompath( |
|
1426 | 1438 | self._opener.join(self._filename)) |
|
1427 | 1439 | |
|
1428 | 1440 | try: |
|
1429 | 1441 | fp = self._opendirstatefile() |
|
1430 | 1442 | try: |
|
1431 | 1443 | st = fp.read() |
|
1432 | 1444 | finally: |
|
1433 | 1445 | fp.close() |
|
1434 | 1446 | except IOError as err: |
|
1435 | 1447 | if err.errno != errno.ENOENT: |
|
1436 | 1448 | raise |
|
1437 | 1449 | return |
|
1438 | 1450 | if not st: |
|
1439 | 1451 | return |
|
1440 | 1452 | |
|
1441 | 1453 | if util.safehasattr(parsers, 'dict_new_presized'): |
|
1442 | 1454 | # Make an estimate of the number of files in the dirstate based on |
|
1443 | 1455 | # its size. From a linear regression on a set of real-world repos, |
|
1444 | 1456 | # all over 10,000 files, the size of a dirstate entry is 85 |
|
1445 | 1457 | # bytes. The cost of resizing is significantly higher than the cost |
|
1446 | 1458 | # of filling in a larger presized dict, so subtract 20% from the |
|
1447 | 1459 | # size. |
|
1448 | 1460 | # |
|
1449 | 1461 | # This heuristic is imperfect in many ways, so in a future dirstate |
|
1450 | 1462 | # format update it makes sense to just record the number of entries |
|
1451 | 1463 | # on write. |
|
1452 | 1464 | self._map = parsers.dict_new_presized(len(st) // 71) |
|
1453 | 1465 | |
|
1454 | 1466 | # Python's garbage collector triggers a GC each time a certain number |
|
1455 | 1467 | # of container objects (the number being defined by |
|
1456 | 1468 | # gc.get_threshold()) are allocated. parse_dirstate creates a tuple |
|
1457 | 1469 | # for each file in the dirstate. The C version then immediately marks |
|
1458 | 1470 | # them as not to be tracked by the collector. However, this has no |
|
1459 | 1471 | # effect on when GCs are triggered, only on what objects the GC looks |
|
1460 | 1472 | # into. This means that O(number of files) GCs are unavoidable. |
|
1461 | 1473 | # Depending on when in the process's lifetime the dirstate is parsed, |
|
1462 | 1474 | # this can get very expensive. As a workaround, disable GC while |
|
1463 | 1475 | # parsing the dirstate. |
|
1464 | 1476 | # |
|
1465 | 1477 | # (we cannot decorate the function directly since it is in a C module) |
|
1466 | 1478 | parse_dirstate = util.nogc(dirstatemod.parse_dirstate) |
|
1467 | 1479 | p = parse_dirstate(self._map, self.copymap, st) |
|
1468 | 1480 | if not self._dirtyparents: |
|
1469 | 1481 | self.setparents(*p) |
|
1470 | 1482 | |
|
1471 | 1483 | # Avoid excess attribute lookups by fast pathing certain checks |
|
1472 | 1484 | self.__contains__ = self._map.__contains__ |
|
1473 | 1485 | self.__getitem__ = self._map.__getitem__ |
|
1474 | 1486 | self.get = self._map.get |
|
1475 | 1487 | |
|
1476 | 1488 | def write(self, st, now): |
|
1477 | 1489 | st.write(dirstatemod.pack_dirstate(self._map, self.copymap, |
|
1478 | 1490 | self.parents(), now)) |
|
1479 | 1491 | st.close() |
|
1480 | 1492 | self._dirtyparents = False |
|
1481 | 1493 | self.nonnormalset, self.otherparentset = self.nonnormalentries() |
|
1482 | 1494 | |
|
1483 | 1495 | @propertycache |
|
1484 | 1496 | def nonnormalset(self): |
|
1485 | 1497 | nonnorm, otherparents = self.nonnormalentries() |
|
1486 | 1498 | self.otherparentset = otherparents |
|
1487 | 1499 | return nonnorm |
|
1488 | 1500 | |
|
1489 | 1501 | @propertycache |
|
1490 | 1502 | def otherparentset(self): |
|
1491 | 1503 | nonnorm, otherparents = self.nonnormalentries() |
|
1492 | 1504 | self.nonnormalset = nonnorm |
|
1493 | 1505 | return otherparents |
|
1494 | 1506 | |
|
1495 | 1507 | @propertycache |
|
1496 | 1508 | def identity(self): |
|
1497 | 1509 | self._map |
|
1498 | 1510 | return self.identity |
|
1499 | 1511 | |
|
1500 | 1512 | @propertycache |
|
1501 | 1513 | def dirfoldmap(self): |
|
1502 | 1514 | f = {} |
|
1503 | 1515 | normcase = util.normcase |
|
1504 | 1516 | for name in self._dirs: |
|
1505 | 1517 | f[normcase(name)] = name |
|
1506 | 1518 | return f |
@@ -1,2302 +1,2333 | |||
|
1 | 1 | # merge.py - directory-level update/merge handling for Mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import errno |
|
11 | 11 | import hashlib |
|
12 | 12 | import shutil |
|
13 | import stat | |
|
13 | 14 | import struct |
|
14 | 15 | |
|
15 | 16 | from .i18n import _ |
|
16 | 17 | from .node import ( |
|
17 | 18 | addednodeid, |
|
18 | 19 | bin, |
|
19 | 20 | hex, |
|
20 | 21 | modifiednodeid, |
|
21 | 22 | nullhex, |
|
22 | 23 | nullid, |
|
23 | 24 | nullrev, |
|
24 | 25 | ) |
|
25 | 26 | from .thirdparty import ( |
|
26 | 27 | attr, |
|
27 | 28 | ) |
|
28 | 29 | from . import ( |
|
29 | 30 | copies, |
|
30 | 31 | encoding, |
|
31 | 32 | error, |
|
32 | 33 | filemerge, |
|
33 | 34 | match as matchmod, |
|
34 | 35 | obsutil, |
|
35 | 36 | pycompat, |
|
36 | 37 | scmutil, |
|
37 | 38 | subrepoutil, |
|
38 | 39 | util, |
|
39 | 40 | worker, |
|
40 | 41 | ) |
|
41 | 42 | |
|
42 | 43 | _pack = struct.pack |
|
43 | 44 | _unpack = struct.unpack |
|
44 | 45 | |
|
45 | 46 | def _droponode(data): |
|
46 | 47 | # used for compatibility for v1 |
|
47 | 48 | bits = data.split('\0') |
|
48 | 49 | bits = bits[:-2] + bits[-1:] |
|
49 | 50 | return '\0'.join(bits) |
|
50 | 51 | |
|
51 | 52 | # Merge state record types. See ``mergestate`` docs for more. |
|
52 | 53 | RECORD_LOCAL = b'L' |
|
53 | 54 | RECORD_OTHER = b'O' |
|
54 | 55 | RECORD_MERGED = b'F' |
|
55 | 56 | RECORD_CHANGEDELETE_CONFLICT = b'C' |
|
56 | 57 | RECORD_MERGE_DRIVER_MERGE = b'D' |
|
57 | 58 | RECORD_PATH_CONFLICT = b'P' |
|
58 | 59 | RECORD_MERGE_DRIVER_STATE = b'm' |
|
59 | 60 | RECORD_FILE_VALUES = b'f' |
|
60 | 61 | RECORD_LABELS = b'l' |
|
61 | 62 | RECORD_OVERRIDE = b't' |
|
62 | 63 | RECORD_UNSUPPORTED_MANDATORY = b'X' |
|
63 | 64 | RECORD_UNSUPPORTED_ADVISORY = b'x' |
|
64 | 65 | |
|
65 | 66 | MERGE_DRIVER_STATE_UNMARKED = b'u' |
|
66 | 67 | MERGE_DRIVER_STATE_MARKED = b'm' |
|
67 | 68 | MERGE_DRIVER_STATE_SUCCESS = b's' |
|
68 | 69 | |
|
69 | 70 | MERGE_RECORD_UNRESOLVED = b'u' |
|
70 | 71 | MERGE_RECORD_RESOLVED = b'r' |
|
71 | 72 | MERGE_RECORD_UNRESOLVED_PATH = b'pu' |
|
72 | 73 | MERGE_RECORD_RESOLVED_PATH = b'pr' |
|
73 | 74 | MERGE_RECORD_DRIVER_RESOLVED = b'd' |
|
74 | 75 | |
|
75 | 76 | ACTION_FORGET = b'f' |
|
76 | 77 | ACTION_REMOVE = b'r' |
|
77 | 78 | ACTION_ADD = b'a' |
|
78 | 79 | ACTION_GET = b'g' |
|
79 | 80 | ACTION_PATH_CONFLICT = b'p' |
|
80 | 81 | ACTION_PATH_CONFLICT_RESOLVE = b'pr' |
|
81 | 82 | ACTION_ADD_MODIFIED = b'am' |
|
82 | 83 | ACTION_CREATED = b'c' |
|
83 | 84 | ACTION_DELETED_CHANGED = b'dc' |
|
84 | 85 | ACTION_CHANGED_DELETED = b'cd' |
|
85 | 86 | ACTION_MERGE = b'm' |
|
86 | 87 | ACTION_LOCAL_DIR_RENAME_GET = b'dg' |
|
87 | 88 | ACTION_DIR_RENAME_MOVE_LOCAL = b'dm' |
|
88 | 89 | ACTION_KEEP = b'k' |
|
89 | 90 | ACTION_EXEC = b'e' |
|
90 | 91 | ACTION_CREATED_MERGE = b'cm' |
|
91 | 92 | |
|
92 | 93 | class mergestate(object): |
|
93 | 94 | '''track 3-way merge state of individual files |
|
94 | 95 | |
|
95 | 96 | The merge state is stored on disk when needed. Two files are used: one with |
|
96 | 97 | an old format (version 1), and one with a new format (version 2). Version 2 |
|
97 | 98 | stores a superset of the data in version 1, including new kinds of records |
|
98 | 99 | in the future. For more about the new format, see the documentation for |
|
99 | 100 | `_readrecordsv2`. |
|
100 | 101 | |
|
101 | 102 | Each record can contain arbitrary content, and has an associated type. This |
|
102 | 103 | `type` should be a letter. If `type` is uppercase, the record is mandatory: |
|
103 | 104 | versions of Mercurial that don't support it should abort. If `type` is |
|
104 | 105 | lowercase, the record can be safely ignored. |
|
105 | 106 | |
|
106 | 107 | Currently known records: |
|
107 | 108 | |
|
108 | 109 | L: the node of the "local" part of the merge (hexified version) |
|
109 | 110 | O: the node of the "other" part of the merge (hexified version) |
|
110 | 111 | F: a file to be merged entry |
|
111 | 112 | C: a change/delete or delete/change conflict |
|
112 | 113 | D: a file that the external merge driver will merge internally |
|
113 | 114 | (experimental) |
|
114 | 115 | P: a path conflict (file vs directory) |
|
115 | 116 | m: the external merge driver defined for this merge plus its run state |
|
116 | 117 | (experimental) |
|
117 | 118 | f: a (filename, dictionary) tuple of optional values for a given file |
|
118 | 119 | X: unsupported mandatory record type (used in tests) |
|
119 | 120 | x: unsupported advisory record type (used in tests) |
|
120 | 121 | l: the labels for the parts of the merge. |
|
121 | 122 | |
|
122 | 123 | Merge driver run states (experimental): |
|
123 | 124 | u: driver-resolved files unmarked -- needs to be run next time we're about |
|
124 | 125 | to resolve or commit |
|
125 | 126 | m: driver-resolved files marked -- only needs to be run before commit |
|
126 | 127 | s: success/skipped -- does not need to be run any more |
|
127 | 128 | |
|
128 | 129 | Merge record states (stored in self._state, indexed by filename): |
|
129 | 130 | u: unresolved conflict |
|
130 | 131 | r: resolved conflict |
|
131 | 132 | pu: unresolved path conflict (file conflicts with directory) |
|
132 | 133 | pr: resolved path conflict |
|
133 | 134 | d: driver-resolved conflict |
|
134 | 135 | |
|
135 | 136 | The resolve command transitions between 'u' and 'r' for conflicts and |
|
136 | 137 | 'pu' and 'pr' for path conflicts. |
|
137 | 138 | ''' |
|
138 | 139 | statepathv1 = 'merge/state' |
|
139 | 140 | statepathv2 = 'merge/state2' |
|
140 | 141 | |
|
141 | 142 | @staticmethod |
|
142 | 143 | def clean(repo, node=None, other=None, labels=None): |
|
143 | 144 | """Initialize a brand new merge state, removing any existing state on |
|
144 | 145 | disk.""" |
|
145 | 146 | ms = mergestate(repo) |
|
146 | 147 | ms.reset(node, other, labels) |
|
147 | 148 | return ms |
|
148 | 149 | |
|
149 | 150 | @staticmethod |
|
150 | 151 | def read(repo): |
|
151 | 152 | """Initialize the merge state, reading it from disk.""" |
|
152 | 153 | ms = mergestate(repo) |
|
153 | 154 | ms._read() |
|
154 | 155 | return ms |
|
155 | 156 | |
|
156 | 157 | def __init__(self, repo): |
|
157 | 158 | """Initialize the merge state. |
|
158 | 159 | |
|
159 | 160 | Do not use this directly! Instead call read() or clean().""" |
|
160 | 161 | self._repo = repo |
|
161 | 162 | self._dirty = False |
|
162 | 163 | self._labels = None |
|
163 | 164 | |
|
164 | 165 | def reset(self, node=None, other=None, labels=None): |
|
165 | 166 | self._state = {} |
|
166 | 167 | self._stateextras = {} |
|
167 | 168 | self._local = None |
|
168 | 169 | self._other = None |
|
169 | 170 | self._labels = labels |
|
170 | 171 | for var in ('localctx', 'otherctx'): |
|
171 | 172 | if var in vars(self): |
|
172 | 173 | delattr(self, var) |
|
173 | 174 | if node: |
|
174 | 175 | self._local = node |
|
175 | 176 | self._other = other |
|
176 | 177 | self._readmergedriver = None |
|
177 | 178 | if self.mergedriver: |
|
178 | 179 | self._mdstate = MERGE_DRIVER_STATE_SUCCESS |
|
179 | 180 | else: |
|
180 | 181 | self._mdstate = MERGE_DRIVER_STATE_UNMARKED |
|
181 | 182 | shutil.rmtree(self._repo.vfs.join('merge'), True) |
|
182 | 183 | self._results = {} |
|
183 | 184 | self._dirty = False |
|
184 | 185 | |
|
185 | 186 | def _read(self): |
|
186 | 187 | """Analyse each record content to restore a serialized state from disk |
|
187 | 188 | |
|
188 | 189 | This function process "record" entry produced by the de-serialization |
|
189 | 190 | of on disk file. |
|
190 | 191 | """ |
|
191 | 192 | self._state = {} |
|
192 | 193 | self._stateextras = {} |
|
193 | 194 | self._local = None |
|
194 | 195 | self._other = None |
|
195 | 196 | for var in ('localctx', 'otherctx'): |
|
196 | 197 | if var in vars(self): |
|
197 | 198 | delattr(self, var) |
|
198 | 199 | self._readmergedriver = None |
|
199 | 200 | self._mdstate = MERGE_DRIVER_STATE_SUCCESS |
|
200 | 201 | unsupported = set() |
|
201 | 202 | records = self._readrecords() |
|
202 | 203 | for rtype, record in records: |
|
203 | 204 | if rtype == RECORD_LOCAL: |
|
204 | 205 | self._local = bin(record) |
|
205 | 206 | elif rtype == RECORD_OTHER: |
|
206 | 207 | self._other = bin(record) |
|
207 | 208 | elif rtype == RECORD_MERGE_DRIVER_STATE: |
|
208 | 209 | bits = record.split('\0', 1) |
|
209 | 210 | mdstate = bits[1] |
|
210 | 211 | if len(mdstate) != 1 or mdstate not in ( |
|
211 | 212 | MERGE_DRIVER_STATE_UNMARKED, MERGE_DRIVER_STATE_MARKED, |
|
212 | 213 | MERGE_DRIVER_STATE_SUCCESS): |
|
213 | 214 | # the merge driver should be idempotent, so just rerun it |
|
214 | 215 | mdstate = MERGE_DRIVER_STATE_UNMARKED |
|
215 | 216 | |
|
216 | 217 | self._readmergedriver = bits[0] |
|
217 | 218 | self._mdstate = mdstate |
|
218 | 219 | elif rtype in (RECORD_MERGED, RECORD_CHANGEDELETE_CONFLICT, |
|
219 | 220 | RECORD_PATH_CONFLICT, RECORD_MERGE_DRIVER_MERGE): |
|
220 | 221 | bits = record.split('\0') |
|
221 | 222 | self._state[bits[0]] = bits[1:] |
|
222 | 223 | elif rtype == RECORD_FILE_VALUES: |
|
223 | 224 | filename, rawextras = record.split('\0', 1) |
|
224 | 225 | extraparts = rawextras.split('\0') |
|
225 | 226 | extras = {} |
|
226 | 227 | i = 0 |
|
227 | 228 | while i < len(extraparts): |
|
228 | 229 | extras[extraparts[i]] = extraparts[i + 1] |
|
229 | 230 | i += 2 |
|
230 | 231 | |
|
231 | 232 | self._stateextras[filename] = extras |
|
232 | 233 | elif rtype == RECORD_LABELS: |
|
233 | 234 | labels = record.split('\0', 2) |
|
234 | 235 | self._labels = [l for l in labels if len(l) > 0] |
|
235 | 236 | elif not rtype.islower(): |
|
236 | 237 | unsupported.add(rtype) |
|
237 | 238 | self._results = {} |
|
238 | 239 | self._dirty = False |
|
239 | 240 | |
|
240 | 241 | if unsupported: |
|
241 | 242 | raise error.UnsupportedMergeRecords(unsupported) |
|
242 | 243 | |
|
243 | 244 | def _readrecords(self): |
|
244 | 245 | """Read merge state from disk and return a list of record (TYPE, data) |
|
245 | 246 | |
|
246 | 247 | We read data from both v1 and v2 files and decide which one to use. |
|
247 | 248 | |
|
248 | 249 | V1 has been used by version prior to 2.9.1 and contains less data than |
|
249 | 250 | v2. We read both versions and check if no data in v2 contradicts |
|
250 | 251 | v1. If there is not contradiction we can safely assume that both v1 |
|
251 | 252 | and v2 were written at the same time and use the extract data in v2. If |
|
252 | 253 | there is contradiction we ignore v2 content as we assume an old version |
|
253 | 254 | of Mercurial has overwritten the mergestate file and left an old v2 |
|
254 | 255 | file around. |
|
255 | 256 | |
|
256 | 257 | returns list of record [(TYPE, data), ...]""" |
|
257 | 258 | v1records = self._readrecordsv1() |
|
258 | 259 | v2records = self._readrecordsv2() |
|
259 | 260 | if self._v1v2match(v1records, v2records): |
|
260 | 261 | return v2records |
|
261 | 262 | else: |
|
262 | 263 | # v1 file is newer than v2 file, use it |
|
263 | 264 | # we have to infer the "other" changeset of the merge |
|
264 | 265 | # we cannot do better than that with v1 of the format |
|
265 | 266 | mctx = self._repo[None].parents()[-1] |
|
266 | 267 | v1records.append((RECORD_OTHER, mctx.hex())) |
|
267 | 268 | # add place holder "other" file node information |
|
268 | 269 | # nobody is using it yet so we do no need to fetch the data |
|
269 | 270 | # if mctx was wrong `mctx[bits[-2]]` may fails. |
|
270 | 271 | for idx, r in enumerate(v1records): |
|
271 | 272 | if r[0] == RECORD_MERGED: |
|
272 | 273 | bits = r[1].split('\0') |
|
273 | 274 | bits.insert(-2, '') |
|
274 | 275 | v1records[idx] = (r[0], '\0'.join(bits)) |
|
275 | 276 | return v1records |
|
276 | 277 | |
|
277 | 278 | def _v1v2match(self, v1records, v2records): |
|
278 | 279 | oldv2 = set() # old format version of v2 record |
|
279 | 280 | for rec in v2records: |
|
280 | 281 | if rec[0] == RECORD_LOCAL: |
|
281 | 282 | oldv2.add(rec) |
|
282 | 283 | elif rec[0] == RECORD_MERGED: |
|
283 | 284 | # drop the onode data (not contained in v1) |
|
284 | 285 | oldv2.add((RECORD_MERGED, _droponode(rec[1]))) |
|
285 | 286 | for rec in v1records: |
|
286 | 287 | if rec not in oldv2: |
|
287 | 288 | return False |
|
288 | 289 | else: |
|
289 | 290 | return True |
|
290 | 291 | |
|
291 | 292 | def _readrecordsv1(self): |
|
292 | 293 | """read on disk merge state for version 1 file |
|
293 | 294 | |
|
294 | 295 | returns list of record [(TYPE, data), ...] |
|
295 | 296 | |
|
296 | 297 | Note: the "F" data from this file are one entry short |
|
297 | 298 | (no "other file node" entry) |
|
298 | 299 | """ |
|
299 | 300 | records = [] |
|
300 | 301 | try: |
|
301 | 302 | f = self._repo.vfs(self.statepathv1) |
|
302 | 303 | for i, l in enumerate(f): |
|
303 | 304 | if i == 0: |
|
304 | 305 | records.append((RECORD_LOCAL, l[:-1])) |
|
305 | 306 | else: |
|
306 | 307 | records.append((RECORD_MERGED, l[:-1])) |
|
307 | 308 | f.close() |
|
308 | 309 | except IOError as err: |
|
309 | 310 | if err.errno != errno.ENOENT: |
|
310 | 311 | raise |
|
311 | 312 | return records |
|
312 | 313 | |
|
313 | 314 | def _readrecordsv2(self): |
|
314 | 315 | """read on disk merge state for version 2 file |
|
315 | 316 | |
|
316 | 317 | This format is a list of arbitrary records of the form: |
|
317 | 318 | |
|
318 | 319 | [type][length][content] |
|
319 | 320 | |
|
320 | 321 | `type` is a single character, `length` is a 4 byte integer, and |
|
321 | 322 | `content` is an arbitrary byte sequence of length `length`. |
|
322 | 323 | |
|
323 | 324 | Mercurial versions prior to 3.7 have a bug where if there are |
|
324 | 325 | unsupported mandatory merge records, attempting to clear out the merge |
|
325 | 326 | state with hg update --clean or similar aborts. The 't' record type |
|
326 | 327 | works around that by writing out what those versions treat as an |
|
327 | 328 | advisory record, but later versions interpret as special: the first |
|
328 | 329 | character is the 'real' record type and everything onwards is the data. |
|
329 | 330 | |
|
330 | 331 | Returns list of records [(TYPE, data), ...].""" |
|
331 | 332 | records = [] |
|
332 | 333 | try: |
|
333 | 334 | f = self._repo.vfs(self.statepathv2) |
|
334 | 335 | data = f.read() |
|
335 | 336 | off = 0 |
|
336 | 337 | end = len(data) |
|
337 | 338 | while off < end: |
|
338 | 339 | rtype = data[off:off + 1] |
|
339 | 340 | off += 1 |
|
340 | 341 | length = _unpack('>I', data[off:(off + 4)])[0] |
|
341 | 342 | off += 4 |
|
342 | 343 | record = data[off:(off + length)] |
|
343 | 344 | off += length |
|
344 | 345 | if rtype == RECORD_OVERRIDE: |
|
345 | 346 | rtype, record = record[0:1], record[1:] |
|
346 | 347 | records.append((rtype, record)) |
|
347 | 348 | f.close() |
|
348 | 349 | except IOError as err: |
|
349 | 350 | if err.errno != errno.ENOENT: |
|
350 | 351 | raise |
|
351 | 352 | return records |
|
352 | 353 | |
|
353 | 354 | @util.propertycache |
|
354 | 355 | def mergedriver(self): |
|
355 | 356 | # protect against the following: |
|
356 | 357 | # - A configures a malicious merge driver in their hgrc, then |
|
357 | 358 | # pauses the merge |
|
358 | 359 | # - A edits their hgrc to remove references to the merge driver |
|
359 | 360 | # - A gives a copy of their entire repo, including .hg, to B |
|
360 | 361 | # - B inspects .hgrc and finds it to be clean |
|
361 | 362 | # - B then continues the merge and the malicious merge driver |
|
362 | 363 | # gets invoked |
|
363 | 364 | configmergedriver = self._repo.ui.config('experimental', 'mergedriver') |
|
364 | 365 | if (self._readmergedriver is not None |
|
365 | 366 | and self._readmergedriver != configmergedriver): |
|
366 | 367 | raise error.ConfigError( |
|
367 | 368 | _("merge driver changed since merge started"), |
|
368 | 369 | hint=_("revert merge driver change or abort merge")) |
|
369 | 370 | |
|
370 | 371 | return configmergedriver |
|
371 | 372 | |
|
372 | 373 | @util.propertycache |
|
373 | 374 | def localctx(self): |
|
374 | 375 | if self._local is None: |
|
375 | 376 | msg = "localctx accessed but self._local isn't set" |
|
376 | 377 | raise error.ProgrammingError(msg) |
|
377 | 378 | return self._repo[self._local] |
|
378 | 379 | |
|
379 | 380 | @util.propertycache |
|
380 | 381 | def otherctx(self): |
|
381 | 382 | if self._other is None: |
|
382 | 383 | msg = "otherctx accessed but self._other isn't set" |
|
383 | 384 | raise error.ProgrammingError(msg) |
|
384 | 385 | return self._repo[self._other] |
|
385 | 386 | |
|
386 | 387 | def active(self): |
|
387 | 388 | """Whether mergestate is active. |
|
388 | 389 | |
|
389 | 390 | Returns True if there appears to be mergestate. This is a rough proxy |
|
390 | 391 | for "is a merge in progress." |
|
391 | 392 | """ |
|
392 | 393 | # Check local variables before looking at filesystem for performance |
|
393 | 394 | # reasons. |
|
394 | 395 | return (bool(self._local) or bool(self._state) or |
|
395 | 396 | self._repo.vfs.exists(self.statepathv1) or |
|
396 | 397 | self._repo.vfs.exists(self.statepathv2)) |
|
397 | 398 | |
|
398 | 399 | def commit(self): |
|
399 | 400 | """Write current state on disk (if necessary)""" |
|
400 | 401 | if self._dirty: |
|
401 | 402 | records = self._makerecords() |
|
402 | 403 | self._writerecords(records) |
|
403 | 404 | self._dirty = False |
|
404 | 405 | |
|
405 | 406 | def _makerecords(self): |
|
406 | 407 | records = [] |
|
407 | 408 | records.append((RECORD_LOCAL, hex(self._local))) |
|
408 | 409 | records.append((RECORD_OTHER, hex(self._other))) |
|
409 | 410 | if self.mergedriver: |
|
410 | 411 | records.append((RECORD_MERGE_DRIVER_STATE, '\0'.join([ |
|
411 | 412 | self.mergedriver, self._mdstate]))) |
|
412 | 413 | # Write out state items. In all cases, the value of the state map entry |
|
413 | 414 | # is written as the contents of the record. The record type depends on |
|
414 | 415 | # the type of state that is stored, and capital-letter records are used |
|
415 | 416 | # to prevent older versions of Mercurial that do not support the feature |
|
416 | 417 | # from loading them. |
|
417 | 418 | for filename, v in self._state.iteritems(): |
|
418 | 419 | if v[0] == MERGE_RECORD_DRIVER_RESOLVED: |
|
419 | 420 | # Driver-resolved merge. These are stored in 'D' records. |
|
420 | 421 | records.append((RECORD_MERGE_DRIVER_MERGE, |
|
421 | 422 | '\0'.join([filename] + v))) |
|
422 | 423 | elif v[0] in (MERGE_RECORD_UNRESOLVED_PATH, |
|
423 | 424 | MERGE_RECORD_RESOLVED_PATH): |
|
424 | 425 | # Path conflicts. These are stored in 'P' records. The current |
|
425 | 426 | # resolution state ('pu' or 'pr') is stored within the record. |
|
426 | 427 | records.append((RECORD_PATH_CONFLICT, |
|
427 | 428 | '\0'.join([filename] + v))) |
|
428 | 429 | elif v[1] == nullhex or v[6] == nullhex: |
|
429 | 430 | # Change/Delete or Delete/Change conflicts. These are stored in |
|
430 | 431 | # 'C' records. v[1] is the local file, and is nullhex when the |
|
431 | 432 | # file is deleted locally ('dc'). v[6] is the remote file, and |
|
432 | 433 | # is nullhex when the file is deleted remotely ('cd'). |
|
433 | 434 | records.append((RECORD_CHANGEDELETE_CONFLICT, |
|
434 | 435 | '\0'.join([filename] + v))) |
|
435 | 436 | else: |
|
436 | 437 | # Normal files. These are stored in 'F' records. |
|
437 | 438 | records.append((RECORD_MERGED, |
|
438 | 439 | '\0'.join([filename] + v))) |
|
439 | 440 | for filename, extras in sorted(self._stateextras.iteritems()): |
|
440 | 441 | rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in |
|
441 | 442 | extras.iteritems()) |
|
442 | 443 | records.append((RECORD_FILE_VALUES, |
|
443 | 444 | '%s\0%s' % (filename, rawextras))) |
|
444 | 445 | if self._labels is not None: |
|
445 | 446 | labels = '\0'.join(self._labels) |
|
446 | 447 | records.append((RECORD_LABELS, labels)) |
|
447 | 448 | return records |
|
448 | 449 | |
|
449 | 450 | def _writerecords(self, records): |
|
450 | 451 | """Write current state on disk (both v1 and v2)""" |
|
451 | 452 | self._writerecordsv1(records) |
|
452 | 453 | self._writerecordsv2(records) |
|
453 | 454 | |
|
454 | 455 | def _writerecordsv1(self, records): |
|
455 | 456 | """Write current state on disk in a version 1 file""" |
|
456 | 457 | f = self._repo.vfs(self.statepathv1, 'wb') |
|
457 | 458 | irecords = iter(records) |
|
458 | 459 | lrecords = next(irecords) |
|
459 | 460 | assert lrecords[0] == RECORD_LOCAL |
|
460 | 461 | f.write(hex(self._local) + '\n') |
|
461 | 462 | for rtype, data in irecords: |
|
462 | 463 | if rtype == RECORD_MERGED: |
|
463 | 464 | f.write('%s\n' % _droponode(data)) |
|
464 | 465 | f.close() |
|
465 | 466 | |
|
466 | 467 | def _writerecordsv2(self, records): |
|
467 | 468 | """Write current state on disk in a version 2 file |
|
468 | 469 | |
|
469 | 470 | See the docstring for _readrecordsv2 for why we use 't'.""" |
|
470 | 471 | # these are the records that all version 2 clients can read |
|
471 | 472 | allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED) |
|
472 | 473 | f = self._repo.vfs(self.statepathv2, 'wb') |
|
473 | 474 | for key, data in records: |
|
474 | 475 | assert len(key) == 1 |
|
475 | 476 | if key not in allowlist: |
|
476 | 477 | key, data = RECORD_OVERRIDE, '%s%s' % (key, data) |
|
477 | 478 | format = '>sI%is' % len(data) |
|
478 | 479 | f.write(_pack(format, key, len(data), data)) |
|
479 | 480 | f.close() |
|
480 | 481 | |
|
481 | 482 | @staticmethod |
|
482 | 483 | def getlocalkey(path): |
|
483 | 484 | """hash the path of a local file context for storage in the .hg/merge |
|
484 | 485 | directory.""" |
|
485 | 486 | |
|
486 | 487 | return hex(hashlib.sha1(path).digest()) |
|
487 | 488 | |
|
488 | 489 | def add(self, fcl, fco, fca, fd): |
|
489 | 490 | """add a new (potentially?) conflicting file the merge state |
|
490 | 491 | fcl: file context for local, |
|
491 | 492 | fco: file context for remote, |
|
492 | 493 | fca: file context for ancestors, |
|
493 | 494 | fd: file path of the resulting merge. |
|
494 | 495 | |
|
495 | 496 | note: also write the local version to the `.hg/merge` directory. |
|
496 | 497 | """ |
|
497 | 498 | if fcl.isabsent(): |
|
498 | 499 | localkey = nullhex |
|
499 | 500 | else: |
|
500 | 501 | localkey = mergestate.getlocalkey(fcl.path()) |
|
501 | 502 | self._repo.vfs.write('merge/' + localkey, fcl.data()) |
|
502 | 503 | self._state[fd] = [MERGE_RECORD_UNRESOLVED, localkey, fcl.path(), |
|
503 | 504 | fca.path(), hex(fca.filenode()), |
|
504 | 505 | fco.path(), hex(fco.filenode()), |
|
505 | 506 | fcl.flags()] |
|
506 | 507 | self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())} |
|
507 | 508 | self._dirty = True |
|
508 | 509 | |
|
509 | 510 | def addpath(self, path, frename, forigin): |
|
510 | 511 | """add a new conflicting path to the merge state |
|
511 | 512 | path: the path that conflicts |
|
512 | 513 | frename: the filename the conflicting file was renamed to |
|
513 | 514 | forigin: origin of the file ('l' or 'r' for local/remote) |
|
514 | 515 | """ |
|
515 | 516 | self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin] |
|
516 | 517 | self._dirty = True |
|
517 | 518 | |
|
518 | 519 | def __contains__(self, dfile): |
|
519 | 520 | return dfile in self._state |
|
520 | 521 | |
|
521 | 522 | def __getitem__(self, dfile): |
|
522 | 523 | return self._state[dfile][0] |
|
523 | 524 | |
|
524 | 525 | def __iter__(self): |
|
525 | 526 | return iter(sorted(self._state)) |
|
526 | 527 | |
|
527 | 528 | def files(self): |
|
528 | 529 | return self._state.keys() |
|
529 | 530 | |
|
530 | 531 | def mark(self, dfile, state): |
|
531 | 532 | self._state[dfile][0] = state |
|
532 | 533 | self._dirty = True |
|
533 | 534 | |
|
534 | 535 | def mdstate(self): |
|
535 | 536 | return self._mdstate |
|
536 | 537 | |
|
537 | 538 | def unresolved(self): |
|
538 | 539 | """Obtain the paths of unresolved files.""" |
|
539 | 540 | |
|
540 | 541 | for f, entry in self._state.iteritems(): |
|
541 | 542 | if entry[0] in (MERGE_RECORD_UNRESOLVED, |
|
542 | 543 | MERGE_RECORD_UNRESOLVED_PATH): |
|
543 | 544 | yield f |
|
544 | 545 | |
|
545 | 546 | def driverresolved(self): |
|
546 | 547 | """Obtain the paths of driver-resolved files.""" |
|
547 | 548 | |
|
548 | 549 | for f, entry in self._state.items(): |
|
549 | 550 | if entry[0] == MERGE_RECORD_DRIVER_RESOLVED: |
|
550 | 551 | yield f |
|
551 | 552 | |
|
552 | 553 | def extras(self, filename): |
|
553 | 554 | return self._stateextras.setdefault(filename, {}) |
|
554 | 555 | |
|
555 | 556 | def _resolve(self, preresolve, dfile, wctx): |
|
556 | 557 | """rerun merge process for file path `dfile`""" |
|
557 | 558 | if self[dfile] in (MERGE_RECORD_RESOLVED, |
|
558 | 559 | MERGE_RECORD_DRIVER_RESOLVED): |
|
559 | 560 | return True, 0 |
|
560 | 561 | stateentry = self._state[dfile] |
|
561 | 562 | state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry |
|
562 | 563 | octx = self._repo[self._other] |
|
563 | 564 | extras = self.extras(dfile) |
|
564 | 565 | anccommitnode = extras.get('ancestorlinknode') |
|
565 | 566 | if anccommitnode: |
|
566 | 567 | actx = self._repo[anccommitnode] |
|
567 | 568 | else: |
|
568 | 569 | actx = None |
|
569 | 570 | fcd = self._filectxorabsent(localkey, wctx, dfile) |
|
570 | 571 | fco = self._filectxorabsent(onode, octx, ofile) |
|
571 | 572 | # TODO: move this to filectxorabsent |
|
572 | 573 | fca = self._repo.filectx(afile, fileid=anode, changectx=actx) |
|
573 | 574 | # "premerge" x flags |
|
574 | 575 | flo = fco.flags() |
|
575 | 576 | fla = fca.flags() |
|
576 | 577 | if 'x' in flags + flo + fla and 'l' not in flags + flo + fla: |
|
577 | 578 | if fca.node() == nullid and flags != flo: |
|
578 | 579 | if preresolve: |
|
579 | 580 | self._repo.ui.warn( |
|
580 | 581 | _('warning: cannot merge flags for %s ' |
|
581 | 582 | 'without common ancestor - keeping local flags\n') |
|
582 | 583 | % afile) |
|
583 | 584 | elif flags == fla: |
|
584 | 585 | flags = flo |
|
585 | 586 | if preresolve: |
|
586 | 587 | # restore local |
|
587 | 588 | if localkey != nullhex: |
|
588 | 589 | f = self._repo.vfs('merge/' + localkey) |
|
589 | 590 | wctx[dfile].write(f.read(), flags) |
|
590 | 591 | f.close() |
|
591 | 592 | else: |
|
592 | 593 | wctx[dfile].remove(ignoremissing=True) |
|
593 | 594 | complete, r, deleted = filemerge.premerge(self._repo, wctx, |
|
594 | 595 | self._local, lfile, fcd, |
|
595 | 596 | fco, fca, |
|
596 | 597 | labels=self._labels) |
|
597 | 598 | else: |
|
598 | 599 | complete, r, deleted = filemerge.filemerge(self._repo, wctx, |
|
599 | 600 | self._local, lfile, fcd, |
|
600 | 601 | fco, fca, |
|
601 | 602 | labels=self._labels) |
|
602 | 603 | if r is None: |
|
603 | 604 | # no real conflict |
|
604 | 605 | del self._state[dfile] |
|
605 | 606 | self._stateextras.pop(dfile, None) |
|
606 | 607 | self._dirty = True |
|
607 | 608 | elif not r: |
|
608 | 609 | self.mark(dfile, MERGE_RECORD_RESOLVED) |
|
609 | 610 | |
|
610 | 611 | if complete: |
|
611 | 612 | action = None |
|
612 | 613 | if deleted: |
|
613 | 614 | if fcd.isabsent(): |
|
614 | 615 | # dc: local picked. Need to drop if present, which may |
|
615 | 616 | # happen on re-resolves. |
|
616 | 617 | action = ACTION_FORGET |
|
617 | 618 | else: |
|
618 | 619 | # cd: remote picked (or otherwise deleted) |
|
619 | 620 | action = ACTION_REMOVE |
|
620 | 621 | else: |
|
621 | 622 | if fcd.isabsent(): # dc: remote picked |
|
622 | 623 | action = ACTION_GET |
|
623 | 624 | elif fco.isabsent(): # cd: local picked |
|
624 | 625 | if dfile in self.localctx: |
|
625 | 626 | action = ACTION_ADD_MODIFIED |
|
626 | 627 | else: |
|
627 | 628 | action = ACTION_ADD |
|
628 | 629 | # else: regular merges (no action necessary) |
|
629 | 630 | self._results[dfile] = r, action |
|
630 | 631 | |
|
631 | 632 | return complete, r |
|
632 | 633 | |
|
633 | 634 | def _filectxorabsent(self, hexnode, ctx, f): |
|
634 | 635 | if hexnode == nullhex: |
|
635 | 636 | return filemerge.absentfilectx(ctx, f) |
|
636 | 637 | else: |
|
637 | 638 | return ctx[f] |
|
638 | 639 | |
|
639 | 640 | def preresolve(self, dfile, wctx): |
|
640 | 641 | """run premerge process for dfile |
|
641 | 642 | |
|
642 | 643 | Returns whether the merge is complete, and the exit code.""" |
|
643 | 644 | return self._resolve(True, dfile, wctx) |
|
644 | 645 | |
|
645 | 646 | def resolve(self, dfile, wctx): |
|
646 | 647 | """run merge process (assuming premerge was run) for dfile |
|
647 | 648 | |
|
648 | 649 | Returns the exit code of the merge.""" |
|
649 | 650 | return self._resolve(False, dfile, wctx)[1] |
|
650 | 651 | |
|
651 | 652 | def counts(self): |
|
652 | 653 | """return counts for updated, merged and removed files in this |
|
653 | 654 | session""" |
|
654 | 655 | updated, merged, removed = 0, 0, 0 |
|
655 | 656 | for r, action in self._results.itervalues(): |
|
656 | 657 | if r is None: |
|
657 | 658 | updated += 1 |
|
658 | 659 | elif r == 0: |
|
659 | 660 | if action == ACTION_REMOVE: |
|
660 | 661 | removed += 1 |
|
661 | 662 | else: |
|
662 | 663 | merged += 1 |
|
663 | 664 | return updated, merged, removed |
|
664 | 665 | |
|
665 | 666 | def unresolvedcount(self): |
|
666 | 667 | """get unresolved count for this merge (persistent)""" |
|
667 | 668 | return len(list(self.unresolved())) |
|
668 | 669 | |
|
669 | 670 | def actions(self): |
|
670 | 671 | """return lists of actions to perform on the dirstate""" |
|
671 | 672 | actions = { |
|
672 | 673 | ACTION_REMOVE: [], |
|
673 | 674 | ACTION_FORGET: [], |
|
674 | 675 | ACTION_ADD: [], |
|
675 | 676 | ACTION_ADD_MODIFIED: [], |
|
676 | 677 | ACTION_GET: [], |
|
677 | 678 | } |
|
678 | 679 | for f, (r, action) in self._results.iteritems(): |
|
679 | 680 | if action is not None: |
|
680 | 681 | actions[action].append((f, None, "merge result")) |
|
681 | 682 | return actions |
|
682 | 683 | |
|
683 | 684 | def recordactions(self): |
|
684 | 685 | """record remove/add/get actions in the dirstate""" |
|
685 | 686 | branchmerge = self._repo.dirstate.p2() != nullid |
|
686 | recordupdates(self._repo, self.actions(), branchmerge) | |
|
687 | recordupdates(self._repo, self.actions(), branchmerge, None) | |
|
687 | 688 | |
|
688 | 689 | def queueremove(self, f): |
|
689 | 690 | """queues a file to be removed from the dirstate |
|
690 | 691 | |
|
691 | 692 | Meant for use by custom merge drivers.""" |
|
692 | 693 | self._results[f] = 0, ACTION_REMOVE |
|
693 | 694 | |
|
694 | 695 | def queueadd(self, f): |
|
695 | 696 | """queues a file to be added to the dirstate |
|
696 | 697 | |
|
697 | 698 | Meant for use by custom merge drivers.""" |
|
698 | 699 | self._results[f] = 0, ACTION_ADD |
|
699 | 700 | |
|
700 | 701 | def queueget(self, f): |
|
701 | 702 | """queues a file to be marked modified in the dirstate |
|
702 | 703 | |
|
703 | 704 | Meant for use by custom merge drivers.""" |
|
704 | 705 | self._results[f] = 0, ACTION_GET |
|
705 | 706 | |
|
706 | 707 | def _getcheckunknownconfig(repo, section, name): |
|
707 | 708 | config = repo.ui.config(section, name) |
|
708 | 709 | valid = ['abort', 'ignore', 'warn'] |
|
709 | 710 | if config not in valid: |
|
710 | 711 | validstr = ', '.join(["'" + v + "'" for v in valid]) |
|
711 | 712 | raise error.ConfigError(_("%s.%s not valid " |
|
712 | 713 | "('%s' is none of %s)") |
|
713 | 714 | % (section, name, config, validstr)) |
|
714 | 715 | return config |
|
715 | 716 | |
|
716 | 717 | def _checkunknownfile(repo, wctx, mctx, f, f2=None): |
|
717 | 718 | if wctx.isinmemory(): |
|
718 | 719 | # Nothing to do in IMM because nothing in the "working copy" can be an |
|
719 | 720 | # unknown file. |
|
720 | 721 | # |
|
721 | 722 | # Note that we should bail out here, not in ``_checkunknownfiles()``, |
|
722 | 723 | # because that function does other useful work. |
|
723 | 724 | return False |
|
724 | 725 | |
|
725 | 726 | if f2 is None: |
|
726 | 727 | f2 = f |
|
727 | 728 | return (repo.wvfs.audit.check(f) |
|
728 | 729 | and repo.wvfs.isfileorlink(f) |
|
729 | 730 | and repo.dirstate.normalize(f) not in repo.dirstate |
|
730 | 731 | and mctx[f2].cmp(wctx[f])) |
|
731 | 732 | |
|
732 | 733 | class _unknowndirschecker(object): |
|
733 | 734 | """ |
|
734 | 735 | Look for any unknown files or directories that may have a path conflict |
|
735 | 736 | with a file. If any path prefix of the file exists as a file or link, |
|
736 | 737 | then it conflicts. If the file itself is a directory that contains any |
|
737 | 738 | file that is not tracked, then it conflicts. |
|
738 | 739 | |
|
739 | 740 | Returns the shortest path at which a conflict occurs, or None if there is |
|
740 | 741 | no conflict. |
|
741 | 742 | """ |
|
742 | 743 | def __init__(self): |
|
743 | 744 | # A set of paths known to be good. This prevents repeated checking of |
|
744 | 745 | # dirs. It will be updated with any new dirs that are checked and found |
|
745 | 746 | # to be safe. |
|
746 | 747 | self._unknowndircache = set() |
|
747 | 748 | |
|
748 | 749 | # A set of paths that are known to be absent. This prevents repeated |
|
749 | 750 | # checking of subdirectories that are known not to exist. It will be |
|
750 | 751 | # updated with any new dirs that are checked and found to be absent. |
|
751 | 752 | self._missingdircache = set() |
|
752 | 753 | |
|
753 | 754 | def __call__(self, repo, wctx, f): |
|
754 | 755 | if wctx.isinmemory(): |
|
755 | 756 | # Nothing to do in IMM for the same reason as ``_checkunknownfile``. |
|
756 | 757 | return False |
|
757 | 758 | |
|
758 | 759 | # Check for path prefixes that exist as unknown files. |
|
759 | 760 | for p in reversed(list(util.finddirs(f))): |
|
760 | 761 | if p in self._missingdircache: |
|
761 | 762 | return |
|
762 | 763 | if p in self._unknowndircache: |
|
763 | 764 | continue |
|
764 | 765 | if repo.wvfs.audit.check(p): |
|
765 | 766 | if (repo.wvfs.isfileorlink(p) |
|
766 | 767 | and repo.dirstate.normalize(p) not in repo.dirstate): |
|
767 | 768 | return p |
|
768 | 769 | if not repo.wvfs.lexists(p): |
|
769 | 770 | self._missingdircache.add(p) |
|
770 | 771 | return |
|
771 | 772 | self._unknowndircache.add(p) |
|
772 | 773 | |
|
773 | 774 | # Check if the file conflicts with a directory containing unknown files. |
|
774 | 775 | if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f): |
|
775 | 776 | # Does the directory contain any files that are not in the dirstate? |
|
776 | 777 | for p, dirs, files in repo.wvfs.walk(f): |
|
777 | 778 | for fn in files: |
|
778 | 779 | relf = util.pconvert(repo.wvfs.reljoin(p, fn)) |
|
779 | 780 | relf = repo.dirstate.normalize(relf, isknown=True) |
|
780 | 781 | if relf not in repo.dirstate: |
|
781 | 782 | return f |
|
782 | 783 | return None |
|
783 | 784 | |
|
784 | 785 | def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce): |
|
785 | 786 | """ |
|
786 | 787 | Considers any actions that care about the presence of conflicting unknown |
|
787 | 788 | files. For some actions, the result is to abort; for others, it is to |
|
788 | 789 | choose a different action. |
|
789 | 790 | """ |
|
790 | 791 | fileconflicts = set() |
|
791 | 792 | pathconflicts = set() |
|
792 | 793 | warnconflicts = set() |
|
793 | 794 | abortconflicts = set() |
|
794 | 795 | unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown') |
|
795 | 796 | ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored') |
|
796 | 797 | pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts') |
|
797 | 798 | if not force: |
|
798 | 799 | def collectconflicts(conflicts, config): |
|
799 | 800 | if config == 'abort': |
|
800 | 801 | abortconflicts.update(conflicts) |
|
801 | 802 | elif config == 'warn': |
|
802 | 803 | warnconflicts.update(conflicts) |
|
803 | 804 | |
|
804 | 805 | checkunknowndirs = _unknowndirschecker() |
|
805 | 806 | for f, (m, args, msg) in actions.iteritems(): |
|
806 | 807 | if m in (ACTION_CREATED, ACTION_DELETED_CHANGED): |
|
807 | 808 | if _checkunknownfile(repo, wctx, mctx, f): |
|
808 | 809 | fileconflicts.add(f) |
|
809 | 810 | elif pathconfig and f not in wctx: |
|
810 | 811 | path = checkunknowndirs(repo, wctx, f) |
|
811 | 812 | if path is not None: |
|
812 | 813 | pathconflicts.add(path) |
|
813 | 814 | elif m == ACTION_LOCAL_DIR_RENAME_GET: |
|
814 | 815 | if _checkunknownfile(repo, wctx, mctx, f, args[0]): |
|
815 | 816 | fileconflicts.add(f) |
|
816 | 817 | |
|
817 | 818 | allconflicts = fileconflicts | pathconflicts |
|
818 | 819 | ignoredconflicts = {c for c in allconflicts |
|
819 | 820 | if repo.dirstate._ignore(c)} |
|
820 | 821 | unknownconflicts = allconflicts - ignoredconflicts |
|
821 | 822 | collectconflicts(ignoredconflicts, ignoredconfig) |
|
822 | 823 | collectconflicts(unknownconflicts, unknownconfig) |
|
823 | 824 | else: |
|
824 | 825 | for f, (m, args, msg) in actions.iteritems(): |
|
825 | 826 | if m == ACTION_CREATED_MERGE: |
|
826 | 827 | fl2, anc = args |
|
827 | 828 | different = _checkunknownfile(repo, wctx, mctx, f) |
|
828 | 829 | if repo.dirstate._ignore(f): |
|
829 | 830 | config = ignoredconfig |
|
830 | 831 | else: |
|
831 | 832 | config = unknownconfig |
|
832 | 833 | |
|
833 | 834 | # The behavior when force is True is described by this table: |
|
834 | 835 | # config different mergeforce | action backup |
|
835 | 836 | # * n * | get n |
|
836 | 837 | # * y y | merge - |
|
837 | 838 | # abort y n | merge - (1) |
|
838 | 839 | # warn y n | warn + get y |
|
839 | 840 | # ignore y n | get y |
|
840 | 841 | # |
|
841 | 842 | # (1) this is probably the wrong behavior here -- we should |
|
842 | 843 | # probably abort, but some actions like rebases currently |
|
843 | 844 | # don't like an abort happening in the middle of |
|
844 | 845 | # merge.update. |
|
845 | 846 | if not different: |
|
846 | 847 | actions[f] = (ACTION_GET, (fl2, False), 'remote created') |
|
847 | 848 | elif mergeforce or config == 'abort': |
|
848 | 849 | actions[f] = (ACTION_MERGE, (f, f, None, False, anc), |
|
849 | 850 | 'remote differs from untracked local') |
|
850 | 851 | elif config == 'abort': |
|
851 | 852 | abortconflicts.add(f) |
|
852 | 853 | else: |
|
853 | 854 | if config == 'warn': |
|
854 | 855 | warnconflicts.add(f) |
|
855 | 856 | actions[f] = (ACTION_GET, (fl2, True), 'remote created') |
|
856 | 857 | |
|
857 | 858 | for f in sorted(abortconflicts): |
|
858 | 859 | warn = repo.ui.warn |
|
859 | 860 | if f in pathconflicts: |
|
860 | 861 | if repo.wvfs.isfileorlink(f): |
|
861 | 862 | warn(_("%s: untracked file conflicts with directory\n") % f) |
|
862 | 863 | else: |
|
863 | 864 | warn(_("%s: untracked directory conflicts with file\n") % f) |
|
864 | 865 | else: |
|
865 | 866 | warn(_("%s: untracked file differs\n") % f) |
|
866 | 867 | if abortconflicts: |
|
867 | 868 | raise error.Abort(_("untracked files in working directory " |
|
868 | 869 | "differ from files in requested revision")) |
|
869 | 870 | |
|
870 | 871 | for f in sorted(warnconflicts): |
|
871 | 872 | if repo.wvfs.isfileorlink(f): |
|
872 | 873 | repo.ui.warn(_("%s: replacing untracked file\n") % f) |
|
873 | 874 | else: |
|
874 | 875 | repo.ui.warn(_("%s: replacing untracked files in directory\n") % f) |
|
875 | 876 | |
|
876 | 877 | for f, (m, args, msg) in actions.iteritems(): |
|
877 | 878 | if m == ACTION_CREATED: |
|
878 | 879 | backup = (f in fileconflicts or f in pathconflicts or |
|
879 | 880 | any(p in pathconflicts for p in util.finddirs(f))) |
|
880 | 881 | flags, = args |
|
881 | 882 | actions[f] = (ACTION_GET, (flags, backup), msg) |
|
882 | 883 | |
|
883 | 884 | def _forgetremoved(wctx, mctx, branchmerge): |
|
884 | 885 | """ |
|
885 | 886 | Forget removed files |
|
886 | 887 | |
|
887 | 888 | If we're jumping between revisions (as opposed to merging), and if |
|
888 | 889 | neither the working directory nor the target rev has the file, |
|
889 | 890 | then we need to remove it from the dirstate, to prevent the |
|
890 | 891 | dirstate from listing the file when it is no longer in the |
|
891 | 892 | manifest. |
|
892 | 893 | |
|
893 | 894 | If we're merging, and the other revision has removed a file |
|
894 | 895 | that is not present in the working directory, we need to mark it |
|
895 | 896 | as removed. |
|
896 | 897 | """ |
|
897 | 898 | |
|
898 | 899 | actions = {} |
|
899 | 900 | m = ACTION_FORGET |
|
900 | 901 | if branchmerge: |
|
901 | 902 | m = ACTION_REMOVE |
|
902 | 903 | for f in wctx.deleted(): |
|
903 | 904 | if f not in mctx: |
|
904 | 905 | actions[f] = m, None, "forget deleted" |
|
905 | 906 | |
|
906 | 907 | if not branchmerge: |
|
907 | 908 | for f in wctx.removed(): |
|
908 | 909 | if f not in mctx: |
|
909 | 910 | actions[f] = ACTION_FORGET, None, "forget removed" |
|
910 | 911 | |
|
911 | 912 | return actions |
|
912 | 913 | |
|
913 | 914 | def _checkcollision(repo, wmf, actions): |
|
914 | 915 | """ |
|
915 | 916 | Check for case-folding collisions. |
|
916 | 917 | """ |
|
917 | 918 | |
|
918 | 919 | # If the repo is narrowed, filter out files outside the narrowspec. |
|
919 | 920 | narrowmatch = repo.narrowmatch() |
|
920 | 921 | if not narrowmatch.always(): |
|
921 | 922 | wmf = wmf.matches(narrowmatch) |
|
922 | 923 | if actions: |
|
923 | 924 | narrowactions = {} |
|
924 | 925 | for m, actionsfortype in actions.iteritems(): |
|
925 | 926 | narrowactions[m] = [] |
|
926 | 927 | for (f, args, msg) in actionsfortype: |
|
927 | 928 | if narrowmatch(f): |
|
928 | 929 | narrowactions[m].append((f, args, msg)) |
|
929 | 930 | actions = narrowactions |
|
930 | 931 | |
|
931 | 932 | # build provisional merged manifest up |
|
932 | 933 | pmmf = set(wmf) |
|
933 | 934 | |
|
934 | 935 | if actions: |
|
935 | 936 | # KEEP and EXEC are no-op |
|
936 | 937 | for m in (ACTION_ADD, ACTION_ADD_MODIFIED, ACTION_FORGET, ACTION_GET, |
|
937 | 938 | ACTION_CHANGED_DELETED, ACTION_DELETED_CHANGED): |
|
938 | 939 | for f, args, msg in actions[m]: |
|
939 | 940 | pmmf.add(f) |
|
940 | 941 | for f, args, msg in actions[ACTION_REMOVE]: |
|
941 | 942 | pmmf.discard(f) |
|
942 | 943 | for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]: |
|
943 | 944 | f2, flags = args |
|
944 | 945 | pmmf.discard(f2) |
|
945 | 946 | pmmf.add(f) |
|
946 | 947 | for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]: |
|
947 | 948 | pmmf.add(f) |
|
948 | 949 | for f, args, msg in actions[ACTION_MERGE]: |
|
949 | 950 | f1, f2, fa, move, anc = args |
|
950 | 951 | if move: |
|
951 | 952 | pmmf.discard(f1) |
|
952 | 953 | pmmf.add(f) |
|
953 | 954 | |
|
954 | 955 | # check case-folding collision in provisional merged manifest |
|
955 | 956 | foldmap = {} |
|
956 | 957 | for f in pmmf: |
|
957 | 958 | fold = util.normcase(f) |
|
958 | 959 | if fold in foldmap: |
|
959 | 960 | raise error.Abort(_("case-folding collision between %s and %s") |
|
960 | 961 | % (f, foldmap[fold])) |
|
961 | 962 | foldmap[fold] = f |
|
962 | 963 | |
|
963 | 964 | # check case-folding of directories |
|
964 | 965 | foldprefix = unfoldprefix = lastfull = '' |
|
965 | 966 | for fold, f in sorted(foldmap.items()): |
|
966 | 967 | if fold.startswith(foldprefix) and not f.startswith(unfoldprefix): |
|
967 | 968 | # the folded prefix matches but actual casing is different |
|
968 | 969 | raise error.Abort(_("case-folding collision between " |
|
969 | 970 | "%s and directory of %s") % (lastfull, f)) |
|
970 | 971 | foldprefix = fold + '/' |
|
971 | 972 | unfoldprefix = f + '/' |
|
972 | 973 | lastfull = f |
|
973 | 974 | |
|
974 | 975 | def driverpreprocess(repo, ms, wctx, labels=None): |
|
975 | 976 | """run the preprocess step of the merge driver, if any |
|
976 | 977 | |
|
977 | 978 | This is currently not implemented -- it's an extension point.""" |
|
978 | 979 | return True |
|
979 | 980 | |
|
980 | 981 | def driverconclude(repo, ms, wctx, labels=None): |
|
981 | 982 | """run the conclude step of the merge driver, if any |
|
982 | 983 | |
|
983 | 984 | This is currently not implemented -- it's an extension point.""" |
|
984 | 985 | return True |
|
985 | 986 | |
|
986 | 987 | def _filesindirs(repo, manifest, dirs): |
|
987 | 988 | """ |
|
988 | 989 | Generator that yields pairs of all the files in the manifest that are found |
|
989 | 990 | inside the directories listed in dirs, and which directory they are found |
|
990 | 991 | in. |
|
991 | 992 | """ |
|
992 | 993 | for f in manifest: |
|
993 | 994 | for p in util.finddirs(f): |
|
994 | 995 | if p in dirs: |
|
995 | 996 | yield f, p |
|
996 | 997 | break |
|
997 | 998 | |
|
998 | 999 | def checkpathconflicts(repo, wctx, mctx, actions): |
|
999 | 1000 | """ |
|
1000 | 1001 | Check if any actions introduce path conflicts in the repository, updating |
|
1001 | 1002 | actions to record or handle the path conflict accordingly. |
|
1002 | 1003 | """ |
|
1003 | 1004 | mf = wctx.manifest() |
|
1004 | 1005 | |
|
1005 | 1006 | # The set of local files that conflict with a remote directory. |
|
1006 | 1007 | localconflicts = set() |
|
1007 | 1008 | |
|
1008 | 1009 | # The set of directories that conflict with a remote file, and so may cause |
|
1009 | 1010 | # conflicts if they still contain any files after the merge. |
|
1010 | 1011 | remoteconflicts = set() |
|
1011 | 1012 | |
|
1012 | 1013 | # The set of directories that appear as both a file and a directory in the |
|
1013 | 1014 | # remote manifest. These indicate an invalid remote manifest, which |
|
1014 | 1015 | # can't be updated to cleanly. |
|
1015 | 1016 | invalidconflicts = set() |
|
1016 | 1017 | |
|
1017 | 1018 | # The set of directories that contain files that are being created. |
|
1018 | 1019 | createdfiledirs = set() |
|
1019 | 1020 | |
|
1020 | 1021 | # The set of files deleted by all the actions. |
|
1021 | 1022 | deletedfiles = set() |
|
1022 | 1023 | |
|
1023 | 1024 | for f, (m, args, msg) in actions.items(): |
|
1024 | 1025 | if m in (ACTION_CREATED, ACTION_DELETED_CHANGED, ACTION_MERGE, |
|
1025 | 1026 | ACTION_CREATED_MERGE): |
|
1026 | 1027 | # This action may create a new local file. |
|
1027 | 1028 | createdfiledirs.update(util.finddirs(f)) |
|
1028 | 1029 | if mf.hasdir(f): |
|
1029 | 1030 | # The file aliases a local directory. This might be ok if all |
|
1030 | 1031 | # the files in the local directory are being deleted. This |
|
1031 | 1032 | # will be checked once we know what all the deleted files are. |
|
1032 | 1033 | remoteconflicts.add(f) |
|
1033 | 1034 | # Track the names of all deleted files. |
|
1034 | 1035 | if m == ACTION_REMOVE: |
|
1035 | 1036 | deletedfiles.add(f) |
|
1036 | 1037 | if m == ACTION_MERGE: |
|
1037 | 1038 | f1, f2, fa, move, anc = args |
|
1038 | 1039 | if move: |
|
1039 | 1040 | deletedfiles.add(f1) |
|
1040 | 1041 | if m == ACTION_DIR_RENAME_MOVE_LOCAL: |
|
1041 | 1042 | f2, flags = args |
|
1042 | 1043 | deletedfiles.add(f2) |
|
1043 | 1044 | |
|
1044 | 1045 | # Check all directories that contain created files for path conflicts. |
|
1045 | 1046 | for p in createdfiledirs: |
|
1046 | 1047 | if p in mf: |
|
1047 | 1048 | if p in mctx: |
|
1048 | 1049 | # A file is in a directory which aliases both a local |
|
1049 | 1050 | # and a remote file. This is an internal inconsistency |
|
1050 | 1051 | # within the remote manifest. |
|
1051 | 1052 | invalidconflicts.add(p) |
|
1052 | 1053 | else: |
|
1053 | 1054 | # A file is in a directory which aliases a local file. |
|
1054 | 1055 | # We will need to rename the local file. |
|
1055 | 1056 | localconflicts.add(p) |
|
1056 | 1057 | if p in actions and actions[p][0] in (ACTION_CREATED, |
|
1057 | 1058 | ACTION_DELETED_CHANGED, |
|
1058 | 1059 | ACTION_MERGE, |
|
1059 | 1060 | ACTION_CREATED_MERGE): |
|
1060 | 1061 | # The file is in a directory which aliases a remote file. |
|
1061 | 1062 | # This is an internal inconsistency within the remote |
|
1062 | 1063 | # manifest. |
|
1063 | 1064 | invalidconflicts.add(p) |
|
1064 | 1065 | |
|
1065 | 1066 | # Rename all local conflicting files that have not been deleted. |
|
1066 | 1067 | for p in localconflicts: |
|
1067 | 1068 | if p not in deletedfiles: |
|
1068 | 1069 | ctxname = bytes(wctx).rstrip('+') |
|
1069 | 1070 | pnew = util.safename(p, ctxname, wctx, set(actions.keys())) |
|
1070 | 1071 | actions[pnew] = (ACTION_PATH_CONFLICT_RESOLVE, (p,), |
|
1071 | 1072 | 'local path conflict') |
|
1072 | 1073 | actions[p] = (ACTION_PATH_CONFLICT, (pnew, 'l'), |
|
1073 | 1074 | 'path conflict') |
|
1074 | 1075 | |
|
1075 | 1076 | if remoteconflicts: |
|
1076 | 1077 | # Check if all files in the conflicting directories have been removed. |
|
1077 | 1078 | ctxname = bytes(mctx).rstrip('+') |
|
1078 | 1079 | for f, p in _filesindirs(repo, mf, remoteconflicts): |
|
1079 | 1080 | if f not in deletedfiles: |
|
1080 | 1081 | m, args, msg = actions[p] |
|
1081 | 1082 | pnew = util.safename(p, ctxname, wctx, set(actions.keys())) |
|
1082 | 1083 | if m in (ACTION_DELETED_CHANGED, ACTION_MERGE): |
|
1083 | 1084 | # Action was merge, just update target. |
|
1084 | 1085 | actions[pnew] = (m, args, msg) |
|
1085 | 1086 | else: |
|
1086 | 1087 | # Action was create, change to renamed get action. |
|
1087 | 1088 | fl = args[0] |
|
1088 | 1089 | actions[pnew] = (ACTION_LOCAL_DIR_RENAME_GET, (p, fl), |
|
1089 | 1090 | 'remote path conflict') |
|
1090 | 1091 | actions[p] = (ACTION_PATH_CONFLICT, (pnew, ACTION_REMOVE), |
|
1091 | 1092 | 'path conflict') |
|
1092 | 1093 | remoteconflicts.remove(p) |
|
1093 | 1094 | break |
|
1094 | 1095 | |
|
1095 | 1096 | if invalidconflicts: |
|
1096 | 1097 | for p in invalidconflicts: |
|
1097 | 1098 | repo.ui.warn(_("%s: is both a file and a directory\n") % p) |
|
1098 | 1099 | raise error.Abort(_("destination manifest contains path conflicts")) |
|
1099 | 1100 | |
|
1100 | 1101 | def _filternarrowactions(narrowmatch, branchmerge, actions): |
|
1101 | 1102 | """ |
|
1102 | 1103 | Filters out actions that can ignored because the repo is narrowed. |
|
1103 | 1104 | |
|
1104 | 1105 | Raise an exception if the merge cannot be completed because the repo is |
|
1105 | 1106 | narrowed. |
|
1106 | 1107 | """ |
|
1107 | 1108 | nooptypes = {'k'} # TODO: handle with nonconflicttypes |
|
1108 | 1109 | nonconflicttypes = set('a am c cm f g r e'.split()) |
|
1109 | 1110 | # We mutate the items in the dict during iteration, so iterate |
|
1110 | 1111 | # over a copy. |
|
1111 | 1112 | for f, action in list(actions.items()): |
|
1112 | 1113 | if narrowmatch(f): |
|
1113 | 1114 | pass |
|
1114 | 1115 | elif not branchmerge: |
|
1115 | 1116 | del actions[f] # just updating, ignore changes outside clone |
|
1116 | 1117 | elif action[0] in nooptypes: |
|
1117 | 1118 | del actions[f] # merge does not affect file |
|
1118 | 1119 | elif action[0] in nonconflicttypes: |
|
1119 | 1120 | raise error.Abort(_('merge affects file \'%s\' outside narrow, ' |
|
1120 | 1121 | 'which is not yet supported') % f, |
|
1121 | 1122 | hint=_('merging in the other direction ' |
|
1122 | 1123 | 'may work')) |
|
1123 | 1124 | else: |
|
1124 | 1125 | raise error.Abort(_('conflict in file \'%s\' is outside ' |
|
1125 | 1126 | 'narrow clone') % f) |
|
1126 | 1127 | |
|
1127 | 1128 | def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher, |
|
1128 | 1129 | acceptremote, followcopies, forcefulldiff=False): |
|
1129 | 1130 | """ |
|
1130 | 1131 | Merge wctx and p2 with ancestor pa and generate merge action list |
|
1131 | 1132 | |
|
1132 | 1133 | branchmerge and force are as passed in to update |
|
1133 | 1134 | matcher = matcher to filter file lists |
|
1134 | 1135 | acceptremote = accept the incoming changes without prompting |
|
1135 | 1136 | """ |
|
1136 | 1137 | if matcher is not None and matcher.always(): |
|
1137 | 1138 | matcher = None |
|
1138 | 1139 | |
|
1139 | 1140 | copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {} |
|
1140 | 1141 | |
|
1141 | 1142 | # manifests fetched in order are going to be faster, so prime the caches |
|
1142 | 1143 | [x.manifest() for x in |
|
1143 | 1144 | sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)] |
|
1144 | 1145 | |
|
1145 | 1146 | if followcopies: |
|
1146 | 1147 | ret = copies.mergecopies(repo, wctx, p2, pa) |
|
1147 | 1148 | copy, movewithdir, diverge, renamedelete, dirmove = ret |
|
1148 | 1149 | |
|
1149 | 1150 | boolbm = pycompat.bytestr(bool(branchmerge)) |
|
1150 | 1151 | boolf = pycompat.bytestr(bool(force)) |
|
1151 | 1152 | boolm = pycompat.bytestr(bool(matcher)) |
|
1152 | 1153 | repo.ui.note(_("resolving manifests\n")) |
|
1153 | 1154 | repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n" |
|
1154 | 1155 | % (boolbm, boolf, boolm)) |
|
1155 | 1156 | repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2)) |
|
1156 | 1157 | |
|
1157 | 1158 | m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest() |
|
1158 | 1159 | copied = set(copy.values()) |
|
1159 | 1160 | copied.update(movewithdir.values()) |
|
1160 | 1161 | |
|
1161 | 1162 | if '.hgsubstate' in m1 and wctx.rev() is None: |
|
1162 | 1163 | # Check whether sub state is modified, and overwrite the manifest |
|
1163 | 1164 | # to flag the change. If wctx is a committed revision, we shouldn't |
|
1164 | 1165 | # care for the dirty state of the working directory. |
|
1165 | 1166 | if any(wctx.sub(s).dirty() for s in wctx.substate): |
|
1166 | 1167 | m1['.hgsubstate'] = modifiednodeid |
|
1167 | 1168 | |
|
1168 | 1169 | # Don't use m2-vs-ma optimization if: |
|
1169 | 1170 | # - ma is the same as m1 or m2, which we're just going to diff again later |
|
1170 | 1171 | # - The caller specifically asks for a full diff, which is useful during bid |
|
1171 | 1172 | # merge. |
|
1172 | 1173 | if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff): |
|
1173 | 1174 | # Identify which files are relevant to the merge, so we can limit the |
|
1174 | 1175 | # total m1-vs-m2 diff to just those files. This has significant |
|
1175 | 1176 | # performance benefits in large repositories. |
|
1176 | 1177 | relevantfiles = set(ma.diff(m2).keys()) |
|
1177 | 1178 | |
|
1178 | 1179 | # For copied and moved files, we need to add the source file too. |
|
1179 | 1180 | for copykey, copyvalue in copy.iteritems(): |
|
1180 | 1181 | if copyvalue in relevantfiles: |
|
1181 | 1182 | relevantfiles.add(copykey) |
|
1182 | 1183 | for movedirkey in movewithdir: |
|
1183 | 1184 | relevantfiles.add(movedirkey) |
|
1184 | 1185 | filesmatcher = scmutil.matchfiles(repo, relevantfiles) |
|
1185 | 1186 | matcher = matchmod.intersectmatchers(matcher, filesmatcher) |
|
1186 | 1187 | |
|
1187 | 1188 | diff = m1.diff(m2, match=matcher) |
|
1188 | 1189 | |
|
1189 | 1190 | actions = {} |
|
1190 | 1191 | for f, ((n1, fl1), (n2, fl2)) in diff.iteritems(): |
|
1191 | 1192 | if n1 and n2: # file exists on both local and remote side |
|
1192 | 1193 | if f not in ma: |
|
1193 | 1194 | fa = copy.get(f, None) |
|
1194 | 1195 | if fa is not None: |
|
1195 | 1196 | actions[f] = (ACTION_MERGE, (f, f, fa, False, pa.node()), |
|
1196 | 1197 | 'both renamed from %s' % fa) |
|
1197 | 1198 | else: |
|
1198 | 1199 | actions[f] = (ACTION_MERGE, (f, f, None, False, pa.node()), |
|
1199 | 1200 | 'both created') |
|
1200 | 1201 | else: |
|
1201 | 1202 | a = ma[f] |
|
1202 | 1203 | fla = ma.flags(f) |
|
1203 | 1204 | nol = 'l' not in fl1 + fl2 + fla |
|
1204 | 1205 | if n2 == a and fl2 == fla: |
|
1205 | 1206 | actions[f] = (ACTION_KEEP, (), 'remote unchanged') |
|
1206 | 1207 | elif n1 == a and fl1 == fla: # local unchanged - use remote |
|
1207 | 1208 | if n1 == n2: # optimization: keep local content |
|
1208 | 1209 | actions[f] = (ACTION_EXEC, (fl2,), 'update permissions') |
|
1209 | 1210 | else: |
|
1210 | 1211 | actions[f] = (ACTION_GET, (fl2, False), |
|
1211 | 1212 | 'remote is newer') |
|
1212 | 1213 | elif nol and n2 == a: # remote only changed 'x' |
|
1213 | 1214 | actions[f] = (ACTION_EXEC, (fl2,), 'update permissions') |
|
1214 | 1215 | elif nol and n1 == a: # local only changed 'x' |
|
1215 | 1216 | actions[f] = (ACTION_GET, (fl1, False), 'remote is newer') |
|
1216 | 1217 | else: # both changed something |
|
1217 | 1218 | actions[f] = (ACTION_MERGE, (f, f, f, False, pa.node()), |
|
1218 | 1219 | 'versions differ') |
|
1219 | 1220 | elif n1: # file exists only on local side |
|
1220 | 1221 | if f in copied: |
|
1221 | 1222 | pass # we'll deal with it on m2 side |
|
1222 | 1223 | elif f in movewithdir: # directory rename, move local |
|
1223 | 1224 | f2 = movewithdir[f] |
|
1224 | 1225 | if f2 in m2: |
|
1225 | 1226 | actions[f2] = (ACTION_MERGE, (f, f2, None, True, pa.node()), |
|
1226 | 1227 | 'remote directory rename, both created') |
|
1227 | 1228 | else: |
|
1228 | 1229 | actions[f2] = (ACTION_DIR_RENAME_MOVE_LOCAL, (f, fl1), |
|
1229 | 1230 | 'remote directory rename - move from %s' % f) |
|
1230 | 1231 | elif f in copy: |
|
1231 | 1232 | f2 = copy[f] |
|
1232 | 1233 | actions[f] = (ACTION_MERGE, (f, f2, f2, False, pa.node()), |
|
1233 | 1234 | 'local copied/moved from %s' % f2) |
|
1234 | 1235 | elif f in ma: # clean, a different, no remote |
|
1235 | 1236 | if n1 != ma[f]: |
|
1236 | 1237 | if acceptremote: |
|
1237 | 1238 | actions[f] = (ACTION_REMOVE, None, 'remote delete') |
|
1238 | 1239 | else: |
|
1239 | 1240 | actions[f] = (ACTION_CHANGED_DELETED, |
|
1240 | 1241 | (f, None, f, False, pa.node()), |
|
1241 | 1242 | 'prompt changed/deleted') |
|
1242 | 1243 | elif n1 == addednodeid: |
|
1243 | 1244 | # This extra 'a' is added by working copy manifest to mark |
|
1244 | 1245 | # the file as locally added. We should forget it instead of |
|
1245 | 1246 | # deleting it. |
|
1246 | 1247 | actions[f] = (ACTION_FORGET, None, 'remote deleted') |
|
1247 | 1248 | else: |
|
1248 | 1249 | actions[f] = (ACTION_REMOVE, None, 'other deleted') |
|
1249 | 1250 | elif n2: # file exists only on remote side |
|
1250 | 1251 | if f in copied: |
|
1251 | 1252 | pass # we'll deal with it on m1 side |
|
1252 | 1253 | elif f in movewithdir: |
|
1253 | 1254 | f2 = movewithdir[f] |
|
1254 | 1255 | if f2 in m1: |
|
1255 | 1256 | actions[f2] = (ACTION_MERGE, |
|
1256 | 1257 | (f2, f, None, False, pa.node()), |
|
1257 | 1258 | 'local directory rename, both created') |
|
1258 | 1259 | else: |
|
1259 | 1260 | actions[f2] = (ACTION_LOCAL_DIR_RENAME_GET, (f, fl2), |
|
1260 | 1261 | 'local directory rename - get from %s' % f) |
|
1261 | 1262 | elif f in copy: |
|
1262 | 1263 | f2 = copy[f] |
|
1263 | 1264 | if f2 in m2: |
|
1264 | 1265 | actions[f] = (ACTION_MERGE, (f2, f, f2, False, pa.node()), |
|
1265 | 1266 | 'remote copied from %s' % f2) |
|
1266 | 1267 | else: |
|
1267 | 1268 | actions[f] = (ACTION_MERGE, (f2, f, f2, True, pa.node()), |
|
1268 | 1269 | 'remote moved from %s' % f2) |
|
1269 | 1270 | elif f not in ma: |
|
1270 | 1271 | # local unknown, remote created: the logic is described by the |
|
1271 | 1272 | # following table: |
|
1272 | 1273 | # |
|
1273 | 1274 | # force branchmerge different | action |
|
1274 | 1275 | # n * * | create |
|
1275 | 1276 | # y n * | create |
|
1276 | 1277 | # y y n | create |
|
1277 | 1278 | # y y y | merge |
|
1278 | 1279 | # |
|
1279 | 1280 | # Checking whether the files are different is expensive, so we |
|
1280 | 1281 | # don't do that when we can avoid it. |
|
1281 | 1282 | if not force: |
|
1282 | 1283 | actions[f] = (ACTION_CREATED, (fl2,), 'remote created') |
|
1283 | 1284 | elif not branchmerge: |
|
1284 | 1285 | actions[f] = (ACTION_CREATED, (fl2,), 'remote created') |
|
1285 | 1286 | else: |
|
1286 | 1287 | actions[f] = (ACTION_CREATED_MERGE, (fl2, pa.node()), |
|
1287 | 1288 | 'remote created, get or merge') |
|
1288 | 1289 | elif n2 != ma[f]: |
|
1289 | 1290 | df = None |
|
1290 | 1291 | for d in dirmove: |
|
1291 | 1292 | if f.startswith(d): |
|
1292 | 1293 | # new file added in a directory that was moved |
|
1293 | 1294 | df = dirmove[d] + f[len(d):] |
|
1294 | 1295 | break |
|
1295 | 1296 | if df is not None and df in m1: |
|
1296 | 1297 | actions[df] = (ACTION_MERGE, (df, f, f, False, pa.node()), |
|
1297 | 1298 | 'local directory rename - respect move ' |
|
1298 | 1299 | 'from %s' % f) |
|
1299 | 1300 | elif acceptremote: |
|
1300 | 1301 | actions[f] = (ACTION_CREATED, (fl2,), 'remote recreating') |
|
1301 | 1302 | else: |
|
1302 | 1303 | actions[f] = (ACTION_DELETED_CHANGED, |
|
1303 | 1304 | (None, f, f, False, pa.node()), |
|
1304 | 1305 | 'prompt deleted/changed') |
|
1305 | 1306 | |
|
1306 | 1307 | if repo.ui.configbool('experimental', 'merge.checkpathconflicts'): |
|
1307 | 1308 | # If we are merging, look for path conflicts. |
|
1308 | 1309 | checkpathconflicts(repo, wctx, p2, actions) |
|
1309 | 1310 | |
|
1310 | 1311 | narrowmatch = repo.narrowmatch() |
|
1311 | 1312 | if not narrowmatch.always(): |
|
1312 | 1313 | # Updates "actions" in place |
|
1313 | 1314 | _filternarrowactions(narrowmatch, branchmerge, actions) |
|
1314 | 1315 | |
|
1315 | 1316 | return actions, diverge, renamedelete |
|
1316 | 1317 | |
|
1317 | 1318 | def _resolvetrivial(repo, wctx, mctx, ancestor, actions): |
|
1318 | 1319 | """Resolves false conflicts where the nodeid changed but the content |
|
1319 | 1320 | remained the same.""" |
|
1320 | 1321 | # We force a copy of actions.items() because we're going to mutate |
|
1321 | 1322 | # actions as we resolve trivial conflicts. |
|
1322 | 1323 | for f, (m, args, msg) in list(actions.items()): |
|
1323 | 1324 | if (m == ACTION_CHANGED_DELETED and f in ancestor |
|
1324 | 1325 | and not wctx[f].cmp(ancestor[f])): |
|
1325 | 1326 | # local did change but ended up with same content |
|
1326 | 1327 | actions[f] = ACTION_REMOVE, None, 'prompt same' |
|
1327 | 1328 | elif (m == ACTION_DELETED_CHANGED and f in ancestor |
|
1328 | 1329 | and not mctx[f].cmp(ancestor[f])): |
|
1329 | 1330 | # remote did change but ended up with same content |
|
1330 | 1331 | del actions[f] # don't get = keep local deleted |
|
1331 | 1332 | |
|
1332 | 1333 | def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, |
|
1333 | 1334 | acceptremote, followcopies, matcher=None, |
|
1334 | 1335 | mergeforce=False): |
|
1335 | 1336 | """Calculate the actions needed to merge mctx into wctx using ancestors""" |
|
1336 | 1337 | # Avoid cycle. |
|
1337 | 1338 | from . import sparse |
|
1338 | 1339 | |
|
1339 | 1340 | if len(ancestors) == 1: # default |
|
1340 | 1341 | actions, diverge, renamedelete = manifestmerge( |
|
1341 | 1342 | repo, wctx, mctx, ancestors[0], branchmerge, force, matcher, |
|
1342 | 1343 | acceptremote, followcopies) |
|
1343 | 1344 | _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce) |
|
1344 | 1345 | |
|
1345 | 1346 | else: # only when merge.preferancestor=* - the default |
|
1346 | 1347 | repo.ui.note( |
|
1347 | 1348 | _("note: merging %s and %s using bids from ancestors %s\n") % |
|
1348 | 1349 | (wctx, mctx, _(' and ').join(pycompat.bytestr(anc) |
|
1349 | 1350 | for anc in ancestors))) |
|
1350 | 1351 | |
|
1351 | 1352 | # Call for bids |
|
1352 | 1353 | fbids = {} # mapping filename to bids (action method to list af actions) |
|
1353 | 1354 | diverge, renamedelete = None, None |
|
1354 | 1355 | for ancestor in ancestors: |
|
1355 | 1356 | repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor) |
|
1356 | 1357 | actions, diverge1, renamedelete1 = manifestmerge( |
|
1357 | 1358 | repo, wctx, mctx, ancestor, branchmerge, force, matcher, |
|
1358 | 1359 | acceptremote, followcopies, forcefulldiff=True) |
|
1359 | 1360 | _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce) |
|
1360 | 1361 | |
|
1361 | 1362 | # Track the shortest set of warning on the theory that bid |
|
1362 | 1363 | # merge will correctly incorporate more information |
|
1363 | 1364 | if diverge is None or len(diverge1) < len(diverge): |
|
1364 | 1365 | diverge = diverge1 |
|
1365 | 1366 | if renamedelete is None or len(renamedelete) < len(renamedelete1): |
|
1366 | 1367 | renamedelete = renamedelete1 |
|
1367 | 1368 | |
|
1368 | 1369 | for f, a in sorted(actions.iteritems()): |
|
1369 | 1370 | m, args, msg = a |
|
1370 | 1371 | repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m)) |
|
1371 | 1372 | if f in fbids: |
|
1372 | 1373 | d = fbids[f] |
|
1373 | 1374 | if m in d: |
|
1374 | 1375 | d[m].append(a) |
|
1375 | 1376 | else: |
|
1376 | 1377 | d[m] = [a] |
|
1377 | 1378 | else: |
|
1378 | 1379 | fbids[f] = {m: [a]} |
|
1379 | 1380 | |
|
1380 | 1381 | # Pick the best bid for each file |
|
1381 | 1382 | repo.ui.note(_('\nauction for merging merge bids\n')) |
|
1382 | 1383 | actions = {} |
|
1383 | 1384 | for f, bids in sorted(fbids.items()): |
|
1384 | 1385 | # bids is a mapping from action method to list af actions |
|
1385 | 1386 | # Consensus? |
|
1386 | 1387 | if len(bids) == 1: # all bids are the same kind of method |
|
1387 | 1388 | m, l = list(bids.items())[0] |
|
1388 | 1389 | if all(a == l[0] for a in l[1:]): # len(bids) is > 1 |
|
1389 | 1390 | repo.ui.note(_(" %s: consensus for %s\n") % (f, m)) |
|
1390 | 1391 | actions[f] = l[0] |
|
1391 | 1392 | continue |
|
1392 | 1393 | # If keep is an option, just do it. |
|
1393 | 1394 | if ACTION_KEEP in bids: |
|
1394 | 1395 | repo.ui.note(_(" %s: picking 'keep' action\n") % f) |
|
1395 | 1396 | actions[f] = bids[ACTION_KEEP][0] |
|
1396 | 1397 | continue |
|
1397 | 1398 | # If there are gets and they all agree [how could they not?], do it. |
|
1398 | 1399 | if ACTION_GET in bids: |
|
1399 | 1400 | ga0 = bids[ACTION_GET][0] |
|
1400 | 1401 | if all(a == ga0 for a in bids[ACTION_GET][1:]): |
|
1401 | 1402 | repo.ui.note(_(" %s: picking 'get' action\n") % f) |
|
1402 | 1403 | actions[f] = ga0 |
|
1403 | 1404 | continue |
|
1404 | 1405 | # TODO: Consider other simple actions such as mode changes |
|
1405 | 1406 | # Handle inefficient democrazy. |
|
1406 | 1407 | repo.ui.note(_(' %s: multiple bids for merge action:\n') % f) |
|
1407 | 1408 | for m, l in sorted(bids.items()): |
|
1408 | 1409 | for _f, args, msg in l: |
|
1409 | 1410 | repo.ui.note(' %s -> %s\n' % (msg, m)) |
|
1410 | 1411 | # Pick random action. TODO: Instead, prompt user when resolving |
|
1411 | 1412 | m, l = list(bids.items())[0] |
|
1412 | 1413 | repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') % |
|
1413 | 1414 | (f, m)) |
|
1414 | 1415 | actions[f] = l[0] |
|
1415 | 1416 | continue |
|
1416 | 1417 | repo.ui.note(_('end of auction\n\n')) |
|
1417 | 1418 | |
|
1418 | 1419 | if wctx.rev() is None: |
|
1419 | 1420 | fractions = _forgetremoved(wctx, mctx, branchmerge) |
|
1420 | 1421 | actions.update(fractions) |
|
1421 | 1422 | |
|
1422 | 1423 | prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge, |
|
1423 | 1424 | actions) |
|
1424 | 1425 | _resolvetrivial(repo, wctx, mctx, ancestors[0], actions) |
|
1425 | 1426 | |
|
1426 | 1427 | return prunedactions, diverge, renamedelete |
|
1427 | 1428 | |
|
1428 | 1429 | def _getcwd(): |
|
1429 | 1430 | try: |
|
1430 | 1431 | return encoding.getcwd() |
|
1431 | 1432 | except OSError as err: |
|
1432 | 1433 | if err.errno == errno.ENOENT: |
|
1433 | 1434 | return None |
|
1434 | 1435 | raise |
|
1435 | 1436 | |
|
1436 | 1437 | def batchremove(repo, wctx, actions): |
|
1437 | 1438 | """apply removes to the working directory |
|
1438 | 1439 | |
|
1439 | 1440 | yields tuples for progress updates |
|
1440 | 1441 | """ |
|
1441 | 1442 | verbose = repo.ui.verbose |
|
1442 | 1443 | cwd = _getcwd() |
|
1443 | 1444 | i = 0 |
|
1444 | 1445 | for f, args, msg in actions: |
|
1445 | 1446 | repo.ui.debug(" %s: %s -> r\n" % (f, msg)) |
|
1446 | 1447 | if verbose: |
|
1447 | 1448 | repo.ui.note(_("removing %s\n") % f) |
|
1448 | 1449 | wctx[f].audit() |
|
1449 | 1450 | try: |
|
1450 | 1451 | wctx[f].remove(ignoremissing=True) |
|
1451 | 1452 | except OSError as inst: |
|
1452 | 1453 | repo.ui.warn(_("update failed to remove %s: %s!\n") % |
|
1453 | 1454 | (f, inst.strerror)) |
|
1454 | 1455 | if i == 100: |
|
1455 | 1456 | yield i, f |
|
1456 | 1457 | i = 0 |
|
1457 | 1458 | i += 1 |
|
1458 | 1459 | if i > 0: |
|
1459 | 1460 | yield i, f |
|
1460 | 1461 | |
|
1461 | 1462 | if cwd and not _getcwd(): |
|
1462 | 1463 | # cwd was removed in the course of removing files; print a helpful |
|
1463 | 1464 | # warning. |
|
1464 | 1465 | repo.ui.warn(_("current directory was removed\n" |
|
1465 | 1466 | "(consider changing to repo root: %s)\n") % repo.root) |
|
1466 | 1467 | |
|
1467 | def batchget(repo, mctx, wctx, actions): | |
|
1468 | def batchget(repo, mctx, wctx, wantfiledata, actions): | |
|
1468 | 1469 | """apply gets to the working directory |
|
1469 | 1470 | |
|
1470 | 1471 | mctx is the context to get from |
|
1471 | 1472 | |
|
1472 |
|
|
|
1473 | Yields arbitrarily many (False, tuple) for progress updates, followed by | |
|
1474 | exactly one (True, filedata). When wantfiledata is false, filedata is an | |
|
1475 | empty list. When wantfiledata is true, filedata[i] is a triple (mode, size, | |
|
1476 | mtime) of the file written for action[i]. | |
|
1473 | 1477 | """ |
|
1478 | filedata = [] | |
|
1474 | 1479 | verbose = repo.ui.verbose |
|
1475 | 1480 | fctx = mctx.filectx |
|
1476 | 1481 | ui = repo.ui |
|
1477 | 1482 | i = 0 |
|
1478 | 1483 | with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)): |
|
1479 | 1484 | for f, (flags, backup), msg in actions: |
|
1480 | 1485 | repo.ui.debug(" %s: %s -> g\n" % (f, msg)) |
|
1481 | 1486 | if verbose: |
|
1482 | 1487 | repo.ui.note(_("getting %s\n") % f) |
|
1483 | 1488 | |
|
1484 | 1489 | if backup: |
|
1485 | 1490 | # If a file or directory exists with the same name, back that |
|
1486 | 1491 | # up. Otherwise, look to see if there is a file that conflicts |
|
1487 | 1492 | # with a directory this file is in, and if so, back that up. |
|
1488 | 1493 | conflicting = f |
|
1489 | 1494 | if not repo.wvfs.lexists(f): |
|
1490 | 1495 | for p in util.finddirs(f): |
|
1491 | 1496 | if repo.wvfs.isfileorlink(p): |
|
1492 | 1497 | conflicting = p |
|
1493 | 1498 | break |
|
1494 | 1499 | if repo.wvfs.lexists(conflicting): |
|
1495 | 1500 | orig = scmutil.backuppath(ui, repo, conflicting) |
|
1496 | 1501 | util.rename(repo.wjoin(conflicting), orig) |
|
1497 |
wctx[f] |
|
|
1502 | wfctx = wctx[f] | |
|
1503 | wfctx.clearunknown() | |
|
1498 | 1504 | atomictemp = ui.configbool("experimental", "update.atomic-file") |
|
1499 |
wctx |
|
|
1505 | size = wfctx.write(fctx(f).data(), flags, | |
|
1506 | backgroundclose=True, | |
|
1500 | 1507 | atomictemp=atomictemp) |
|
1508 | if wantfiledata: | |
|
1509 | s = wfctx.lstat() | |
|
1510 | mode = s.st_mode | |
|
1511 | mtime = s[stat.ST_MTIME] | |
|
1512 | filedata.append((mode, size, mtime)) # for dirstate.normal | |
|
1501 | 1513 | if i == 100: |
|
1502 | yield i, f | |
|
1514 | yield False, (i, f) | |
|
1503 | 1515 | i = 0 |
|
1504 | 1516 | i += 1 |
|
1505 | 1517 | if i > 0: |
|
1506 | yield i, f | |
|
1518 | yield False, (i, f) | |
|
1519 | yield True, filedata | |
|
1507 | 1520 | |
|
1508 | 1521 | def _prefetchfiles(repo, ctx, actions): |
|
1509 | 1522 | """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict |
|
1510 | 1523 | of merge actions. ``ctx`` is the context being merged in.""" |
|
1511 | 1524 | |
|
1512 | 1525 | # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they |
|
1513 | 1526 | # don't touch the context to be merged in. 'cd' is skipped, because |
|
1514 | 1527 | # changed/deleted never resolves to something from the remote side. |
|
1515 | 1528 | oplist = [actions[a] for a in (ACTION_GET, ACTION_DELETED_CHANGED, |
|
1516 | 1529 | ACTION_LOCAL_DIR_RENAME_GET, ACTION_MERGE)] |
|
1517 | 1530 | prefetch = scmutil.prefetchfiles |
|
1518 | 1531 | matchfiles = scmutil.matchfiles |
|
1519 | 1532 | prefetch(repo, [ctx.rev()], |
|
1520 | 1533 | matchfiles(repo, |
|
1521 | 1534 | [f for sublist in oplist for f, args, msg in sublist])) |
|
1522 | 1535 | |
|
1523 | 1536 | @attr.s(frozen=True) |
|
1524 | 1537 | class updateresult(object): |
|
1525 | 1538 | updatedcount = attr.ib() |
|
1526 | 1539 | mergedcount = attr.ib() |
|
1527 | 1540 | removedcount = attr.ib() |
|
1528 | 1541 | unresolvedcount = attr.ib() |
|
1529 | 1542 | |
|
1530 | 1543 | def isempty(self): |
|
1531 | 1544 | return not (self.updatedcount or self.mergedcount |
|
1532 | 1545 | or self.removedcount or self.unresolvedcount) |
|
1533 | 1546 | |
|
1534 | 1547 | def emptyactions(): |
|
1535 | 1548 | """create an actions dict, to be populated and passed to applyupdates()""" |
|
1536 | 1549 | return dict((m, []) |
|
1537 | 1550 | for m in ( |
|
1538 | 1551 | ACTION_ADD, |
|
1539 | 1552 | ACTION_ADD_MODIFIED, |
|
1540 | 1553 | ACTION_FORGET, |
|
1541 | 1554 | ACTION_GET, |
|
1542 | 1555 | ACTION_CHANGED_DELETED, |
|
1543 | 1556 | ACTION_DELETED_CHANGED, |
|
1544 | 1557 | ACTION_REMOVE, |
|
1545 | 1558 | ACTION_DIR_RENAME_MOVE_LOCAL, |
|
1546 | 1559 | ACTION_LOCAL_DIR_RENAME_GET, |
|
1547 | 1560 | ACTION_MERGE, |
|
1548 | 1561 | ACTION_EXEC, |
|
1549 | 1562 | ACTION_KEEP, |
|
1550 | 1563 | ACTION_PATH_CONFLICT, |
|
1551 | 1564 | ACTION_PATH_CONFLICT_RESOLVE)) |
|
1552 | 1565 | |
|
1553 |
def applyupdates(repo, actions, wctx, mctx, overwrite, |
|
|
1566 | def applyupdates(repo, actions, wctx, mctx, overwrite, wantfiledata, | |
|
1567 | labels=None): | |
|
1554 | 1568 | """apply the merge action list to the working directory |
|
1555 | 1569 | |
|
1556 | 1570 | wctx is the working copy context |
|
1557 | 1571 | mctx is the context to be merged into the working copy |
|
1558 | 1572 | |
|
1559 | Return a tuple of counts (updated, merged, removed, unresolved) that | |
|
1560 | describes how many files were affected by the update. | |
|
1573 | Return a tuple of (counts, filedata), where counts is a tuple | |
|
1574 | (updated, merged, removed, unresolved) that describes how many | |
|
1575 | files were affected by the update, and filedata is as described in | |
|
1576 | batchget. | |
|
1561 | 1577 | """ |
|
1562 | 1578 | |
|
1563 | 1579 | _prefetchfiles(repo, mctx, actions) |
|
1564 | 1580 | |
|
1565 | 1581 | updated, merged, removed = 0, 0, 0 |
|
1566 | 1582 | ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels) |
|
1567 | 1583 | moves = [] |
|
1568 | 1584 | for m, l in actions.items(): |
|
1569 | 1585 | l.sort() |
|
1570 | 1586 | |
|
1571 | 1587 | # 'cd' and 'dc' actions are treated like other merge conflicts |
|
1572 | 1588 | mergeactions = sorted(actions[ACTION_CHANGED_DELETED]) |
|
1573 | 1589 | mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED])) |
|
1574 | 1590 | mergeactions.extend(actions[ACTION_MERGE]) |
|
1575 | 1591 | for f, args, msg in mergeactions: |
|
1576 | 1592 | f1, f2, fa, move, anc = args |
|
1577 | 1593 | if f == '.hgsubstate': # merged internally |
|
1578 | 1594 | continue |
|
1579 | 1595 | if f1 is None: |
|
1580 | 1596 | fcl = filemerge.absentfilectx(wctx, fa) |
|
1581 | 1597 | else: |
|
1582 | 1598 | repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f)) |
|
1583 | 1599 | fcl = wctx[f1] |
|
1584 | 1600 | if f2 is None: |
|
1585 | 1601 | fco = filemerge.absentfilectx(mctx, fa) |
|
1586 | 1602 | else: |
|
1587 | 1603 | fco = mctx[f2] |
|
1588 | 1604 | actx = repo[anc] |
|
1589 | 1605 | if fa in actx: |
|
1590 | 1606 | fca = actx[fa] |
|
1591 | 1607 | else: |
|
1592 | 1608 | # TODO: move to absentfilectx |
|
1593 | 1609 | fca = repo.filectx(f1, fileid=nullrev) |
|
1594 | 1610 | ms.add(fcl, fco, fca, f) |
|
1595 | 1611 | if f1 != f and move: |
|
1596 | 1612 | moves.append(f1) |
|
1597 | 1613 | |
|
1598 | 1614 | # remove renamed files after safely stored |
|
1599 | 1615 | for f in moves: |
|
1600 | 1616 | if wctx[f].lexists(): |
|
1601 | 1617 | repo.ui.debug("removing %s\n" % f) |
|
1602 | 1618 | wctx[f].audit() |
|
1603 | 1619 | wctx[f].remove() |
|
1604 | 1620 | |
|
1605 | 1621 | numupdates = sum(len(l) for m, l in actions.items() |
|
1606 | 1622 | if m != ACTION_KEEP) |
|
1607 | 1623 | progress = repo.ui.makeprogress(_('updating'), unit=_('files'), |
|
1608 | 1624 | total=numupdates) |
|
1609 | 1625 | |
|
1610 | 1626 | if [a for a in actions[ACTION_REMOVE] if a[0] == '.hgsubstate']: |
|
1611 | 1627 | subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels) |
|
1612 | 1628 | |
|
1613 | 1629 | # record path conflicts |
|
1614 | 1630 | for f, args, msg in actions[ACTION_PATH_CONFLICT]: |
|
1615 | 1631 | f1, fo = args |
|
1616 | 1632 | s = repo.ui.status |
|
1617 | 1633 | s(_("%s: path conflict - a file or link has the same name as a " |
|
1618 | 1634 | "directory\n") % f) |
|
1619 | 1635 | if fo == 'l': |
|
1620 | 1636 | s(_("the local file has been renamed to %s\n") % f1) |
|
1621 | 1637 | else: |
|
1622 | 1638 | s(_("the remote file has been renamed to %s\n") % f1) |
|
1623 | 1639 | s(_("resolve manually then use 'hg resolve --mark %s'\n") % f) |
|
1624 | 1640 | ms.addpath(f, f1, fo) |
|
1625 | 1641 | progress.increment(item=f) |
|
1626 | 1642 | |
|
1627 | 1643 | # When merging in-memory, we can't support worker processes, so set the |
|
1628 | 1644 | # per-item cost at 0 in that case. |
|
1629 | 1645 | cost = 0 if wctx.isinmemory() else 0.001 |
|
1630 | 1646 | |
|
1631 | 1647 | # remove in parallel (must come before resolving path conflicts and getting) |
|
1632 | 1648 | prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx), |
|
1633 | 1649 | actions[ACTION_REMOVE]) |
|
1634 | 1650 | for i, item in prog: |
|
1635 | 1651 | progress.increment(step=i, item=item) |
|
1636 | 1652 | removed = len(actions[ACTION_REMOVE]) |
|
1637 | 1653 | |
|
1638 | 1654 | # resolve path conflicts (must come before getting) |
|
1639 | 1655 | for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]: |
|
1640 | 1656 | repo.ui.debug(" %s: %s -> pr\n" % (f, msg)) |
|
1641 | 1657 | f0, = args |
|
1642 | 1658 | if wctx[f0].lexists(): |
|
1643 | 1659 | repo.ui.note(_("moving %s to %s\n") % (f0, f)) |
|
1644 | 1660 | wctx[f].audit() |
|
1645 | 1661 | wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags()) |
|
1646 | 1662 | wctx[f0].remove() |
|
1647 | 1663 | progress.increment(item=f) |
|
1648 | 1664 | |
|
1649 | 1665 | # get in parallel. |
|
1650 | 1666 | threadsafe = repo.ui.configbool('experimental', |
|
1651 | 1667 | 'worker.wdir-get-thread-safe') |
|
1652 |
prog = worker.worker(repo.ui, cost, batchget, |
|
|
1668 | prog = worker.worker(repo.ui, cost, batchget, | |
|
1669 | (repo, mctx, wctx, wantfiledata), | |
|
1653 | 1670 | actions[ACTION_GET], |
|
1654 |
threadsafe=threadsafe |
|
|
1655 | for i, item in prog: | |
|
1671 | threadsafe=threadsafe, | |
|
1672 | hasretval=True) | |
|
1673 | getfiledata = [] | |
|
1674 | for final, res in prog: | |
|
1675 | if final: | |
|
1676 | getfiledata = res | |
|
1677 | else: | |
|
1678 | i, item = res | |
|
1656 | 1679 | progress.increment(step=i, item=item) |
|
1657 | 1680 | updated = len(actions[ACTION_GET]) |
|
1658 | 1681 | |
|
1659 | 1682 | if [a for a in actions[ACTION_GET] if a[0] == '.hgsubstate']: |
|
1660 | 1683 | subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels) |
|
1661 | 1684 | |
|
1662 | 1685 | # forget (manifest only, just log it) (must come first) |
|
1663 | 1686 | for f, args, msg in actions[ACTION_FORGET]: |
|
1664 | 1687 | repo.ui.debug(" %s: %s -> f\n" % (f, msg)) |
|
1665 | 1688 | progress.increment(item=f) |
|
1666 | 1689 | |
|
1667 | 1690 | # re-add (manifest only, just log it) |
|
1668 | 1691 | for f, args, msg in actions[ACTION_ADD]: |
|
1669 | 1692 | repo.ui.debug(" %s: %s -> a\n" % (f, msg)) |
|
1670 | 1693 | progress.increment(item=f) |
|
1671 | 1694 | |
|
1672 | 1695 | # re-add/mark as modified (manifest only, just log it) |
|
1673 | 1696 | for f, args, msg in actions[ACTION_ADD_MODIFIED]: |
|
1674 | 1697 | repo.ui.debug(" %s: %s -> am\n" % (f, msg)) |
|
1675 | 1698 | progress.increment(item=f) |
|
1676 | 1699 | |
|
1677 | 1700 | # keep (noop, just log it) |
|
1678 | 1701 | for f, args, msg in actions[ACTION_KEEP]: |
|
1679 | 1702 | repo.ui.debug(" %s: %s -> k\n" % (f, msg)) |
|
1680 | 1703 | # no progress |
|
1681 | 1704 | |
|
1682 | 1705 | # directory rename, move local |
|
1683 | 1706 | for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]: |
|
1684 | 1707 | repo.ui.debug(" %s: %s -> dm\n" % (f, msg)) |
|
1685 | 1708 | progress.increment(item=f) |
|
1686 | 1709 | f0, flags = args |
|
1687 | 1710 | repo.ui.note(_("moving %s to %s\n") % (f0, f)) |
|
1688 | 1711 | wctx[f].audit() |
|
1689 | 1712 | wctx[f].write(wctx.filectx(f0).data(), flags) |
|
1690 | 1713 | wctx[f0].remove() |
|
1691 | 1714 | updated += 1 |
|
1692 | 1715 | |
|
1693 | 1716 | # local directory rename, get |
|
1694 | 1717 | for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]: |
|
1695 | 1718 | repo.ui.debug(" %s: %s -> dg\n" % (f, msg)) |
|
1696 | 1719 | progress.increment(item=f) |
|
1697 | 1720 | f0, flags = args |
|
1698 | 1721 | repo.ui.note(_("getting %s to %s\n") % (f0, f)) |
|
1699 | 1722 | wctx[f].write(mctx.filectx(f0).data(), flags) |
|
1700 | 1723 | updated += 1 |
|
1701 | 1724 | |
|
1702 | 1725 | # exec |
|
1703 | 1726 | for f, args, msg in actions[ACTION_EXEC]: |
|
1704 | 1727 | repo.ui.debug(" %s: %s -> e\n" % (f, msg)) |
|
1705 | 1728 | progress.increment(item=f) |
|
1706 | 1729 | flags, = args |
|
1707 | 1730 | wctx[f].audit() |
|
1708 | 1731 | wctx[f].setflags('l' in flags, 'x' in flags) |
|
1709 | 1732 | updated += 1 |
|
1710 | 1733 | |
|
1711 | 1734 | # the ordering is important here -- ms.mergedriver will raise if the merge |
|
1712 | 1735 | # driver has changed, and we want to be able to bypass it when overwrite is |
|
1713 | 1736 | # True |
|
1714 | 1737 | usemergedriver = not overwrite and mergeactions and ms.mergedriver |
|
1715 | 1738 | |
|
1716 | 1739 | if usemergedriver: |
|
1717 | 1740 | if wctx.isinmemory(): |
|
1718 | 1741 | raise error.InMemoryMergeConflictsError("in-memory merge does not " |
|
1719 | 1742 | "support mergedriver") |
|
1720 | 1743 | ms.commit() |
|
1721 | 1744 | proceed = driverpreprocess(repo, ms, wctx, labels=labels) |
|
1722 | 1745 | # the driver might leave some files unresolved |
|
1723 | 1746 | unresolvedf = set(ms.unresolved()) |
|
1724 | 1747 | if not proceed: |
|
1725 | 1748 | # XXX setting unresolved to at least 1 is a hack to make sure we |
|
1726 | 1749 | # error out |
|
1727 | 1750 | return updateresult(updated, merged, removed, |
|
1728 | 1751 | max(len(unresolvedf), 1)) |
|
1729 | 1752 | newactions = [] |
|
1730 | 1753 | for f, args, msg in mergeactions: |
|
1731 | 1754 | if f in unresolvedf: |
|
1732 | 1755 | newactions.append((f, args, msg)) |
|
1733 | 1756 | mergeactions = newactions |
|
1734 | 1757 | |
|
1735 | 1758 | try: |
|
1736 | 1759 | # premerge |
|
1737 | 1760 | tocomplete = [] |
|
1738 | 1761 | for f, args, msg in mergeactions: |
|
1739 | 1762 | repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg)) |
|
1740 | 1763 | progress.increment(item=f) |
|
1741 | 1764 | if f == '.hgsubstate': # subrepo states need updating |
|
1742 | 1765 | subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx), |
|
1743 | 1766 | overwrite, labels) |
|
1744 | 1767 | continue |
|
1745 | 1768 | wctx[f].audit() |
|
1746 | 1769 | complete, r = ms.preresolve(f, wctx) |
|
1747 | 1770 | if not complete: |
|
1748 | 1771 | numupdates += 1 |
|
1749 | 1772 | tocomplete.append((f, args, msg)) |
|
1750 | 1773 | |
|
1751 | 1774 | # merge |
|
1752 | 1775 | for f, args, msg in tocomplete: |
|
1753 | 1776 | repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg)) |
|
1754 | 1777 | progress.increment(item=f, total=numupdates) |
|
1755 | 1778 | ms.resolve(f, wctx) |
|
1756 | 1779 | |
|
1757 | 1780 | finally: |
|
1758 | 1781 | ms.commit() |
|
1759 | 1782 | |
|
1760 | 1783 | unresolved = ms.unresolvedcount() |
|
1761 | 1784 | |
|
1762 | 1785 | if (usemergedriver and not unresolved |
|
1763 | 1786 | and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS): |
|
1764 | 1787 | if not driverconclude(repo, ms, wctx, labels=labels): |
|
1765 | 1788 | # XXX setting unresolved to at least 1 is a hack to make sure we |
|
1766 | 1789 | # error out |
|
1767 | 1790 | unresolved = max(unresolved, 1) |
|
1768 | 1791 | |
|
1769 | 1792 | ms.commit() |
|
1770 | 1793 | |
|
1771 | 1794 | msupdated, msmerged, msremoved = ms.counts() |
|
1772 | 1795 | updated += msupdated |
|
1773 | 1796 | merged += msmerged |
|
1774 | 1797 | removed += msremoved |
|
1775 | 1798 | |
|
1776 | 1799 | extraactions = ms.actions() |
|
1777 | 1800 | if extraactions: |
|
1778 | 1801 | mfiles = set(a[0] for a in actions[ACTION_MERGE]) |
|
1779 | 1802 | for k, acts in extraactions.iteritems(): |
|
1780 | 1803 | actions[k].extend(acts) |
|
1804 | if k == ACTION_GET and wantfiledata: | |
|
1805 | # no filedata until mergestate is updated to provide it | |
|
1806 | getfiledata.extend([None] * len(acts)) | |
|
1781 | 1807 | # Remove these files from actions[ACTION_MERGE] as well. This is |
|
1782 | 1808 | # important because in recordupdates, files in actions[ACTION_MERGE] |
|
1783 | 1809 | # are processed after files in other actions, and the merge driver |
|
1784 | 1810 | # might add files to those actions via extraactions above. This can |
|
1785 | 1811 | # lead to a file being recorded twice, with poor results. This is |
|
1786 | 1812 | # especially problematic for actions[ACTION_REMOVE] (currently only |
|
1787 | 1813 | # possible with the merge driver in the initial merge process; |
|
1788 | 1814 | # interrupted merges don't go through this flow). |
|
1789 | 1815 | # |
|
1790 | 1816 | # The real fix here is to have indexes by both file and action so |
|
1791 | 1817 | # that when the action for a file is changed it is automatically |
|
1792 | 1818 | # reflected in the other action lists. But that involves a more |
|
1793 | 1819 | # complex data structure, so this will do for now. |
|
1794 | 1820 | # |
|
1795 | 1821 | # We don't need to do the same operation for 'dc' and 'cd' because |
|
1796 | 1822 | # those lists aren't consulted again. |
|
1797 | 1823 | mfiles.difference_update(a[0] for a in acts) |
|
1798 | 1824 | |
|
1799 | 1825 | actions[ACTION_MERGE] = [a for a in actions[ACTION_MERGE] |
|
1800 | 1826 | if a[0] in mfiles] |
|
1801 | 1827 | |
|
1802 | 1828 | progress.complete() |
|
1803 | return updateresult(updated, merged, removed, unresolved) | |
|
1829 | assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0) | |
|
1830 | return updateresult(updated, merged, removed, unresolved), getfiledata | |
|
1804 | 1831 | |
|
1805 | def recordupdates(repo, actions, branchmerge): | |
|
1832 | def recordupdates(repo, actions, branchmerge, getfiledata): | |
|
1806 | 1833 | "record merge actions to the dirstate" |
|
1807 | 1834 | # remove (must come first) |
|
1808 | 1835 | for f, args, msg in actions.get(ACTION_REMOVE, []): |
|
1809 | 1836 | if branchmerge: |
|
1810 | 1837 | repo.dirstate.remove(f) |
|
1811 | 1838 | else: |
|
1812 | 1839 | repo.dirstate.drop(f) |
|
1813 | 1840 | |
|
1814 | 1841 | # forget (must come first) |
|
1815 | 1842 | for f, args, msg in actions.get(ACTION_FORGET, []): |
|
1816 | 1843 | repo.dirstate.drop(f) |
|
1817 | 1844 | |
|
1818 | 1845 | # resolve path conflicts |
|
1819 | 1846 | for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []): |
|
1820 | 1847 | f0, = args |
|
1821 | 1848 | origf0 = repo.dirstate.copied(f0) or f0 |
|
1822 | 1849 | repo.dirstate.add(f) |
|
1823 | 1850 | repo.dirstate.copy(origf0, f) |
|
1824 | 1851 | if f0 == origf0: |
|
1825 | 1852 | repo.dirstate.remove(f0) |
|
1826 | 1853 | else: |
|
1827 | 1854 | repo.dirstate.drop(f0) |
|
1828 | 1855 | |
|
1829 | 1856 | # re-add |
|
1830 | 1857 | for f, args, msg in actions.get(ACTION_ADD, []): |
|
1831 | 1858 | repo.dirstate.add(f) |
|
1832 | 1859 | |
|
1833 | 1860 | # re-add/mark as modified |
|
1834 | 1861 | for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []): |
|
1835 | 1862 | if branchmerge: |
|
1836 | 1863 | repo.dirstate.normallookup(f) |
|
1837 | 1864 | else: |
|
1838 | 1865 | repo.dirstate.add(f) |
|
1839 | 1866 | |
|
1840 | 1867 | # exec change |
|
1841 | 1868 | for f, args, msg in actions.get(ACTION_EXEC, []): |
|
1842 | 1869 | repo.dirstate.normallookup(f) |
|
1843 | 1870 | |
|
1844 | 1871 | # keep |
|
1845 | 1872 | for f, args, msg in actions.get(ACTION_KEEP, []): |
|
1846 | 1873 | pass |
|
1847 | 1874 | |
|
1848 | 1875 | # get |
|
1849 | for f, args, msg in actions.get(ACTION_GET, []): | |
|
1876 | for i, (f, args, msg) in enumerate(actions.get(ACTION_GET, [])): | |
|
1850 | 1877 | if branchmerge: |
|
1851 | 1878 | repo.dirstate.otherparent(f) |
|
1852 | 1879 | else: |
|
1853 | repo.dirstate.normal(f) | |
|
1880 | parentfiledata = getfiledata[i] if getfiledata else None | |
|
1881 | repo.dirstate.normal(f, parentfiledata=parentfiledata) | |
|
1854 | 1882 | |
|
1855 | 1883 | # merge |
|
1856 | 1884 | for f, args, msg in actions.get(ACTION_MERGE, []): |
|
1857 | 1885 | f1, f2, fa, move, anc = args |
|
1858 | 1886 | if branchmerge: |
|
1859 | 1887 | # We've done a branch merge, mark this file as merged |
|
1860 | 1888 | # so that we properly record the merger later |
|
1861 | 1889 | repo.dirstate.merge(f) |
|
1862 | 1890 | if f1 != f2: # copy/rename |
|
1863 | 1891 | if move: |
|
1864 | 1892 | repo.dirstate.remove(f1) |
|
1865 | 1893 | if f1 != f: |
|
1866 | 1894 | repo.dirstate.copy(f1, f) |
|
1867 | 1895 | else: |
|
1868 | 1896 | repo.dirstate.copy(f2, f) |
|
1869 | 1897 | else: |
|
1870 | 1898 | # We've update-merged a locally modified file, so |
|
1871 | 1899 | # we set the dirstate to emulate a normal checkout |
|
1872 | 1900 | # of that file some time in the past. Thus our |
|
1873 | 1901 | # merge will appear as a normal local file |
|
1874 | 1902 | # modification. |
|
1875 | 1903 | if f2 == f: # file not locally copied/moved |
|
1876 | 1904 | repo.dirstate.normallookup(f) |
|
1877 | 1905 | if move: |
|
1878 | 1906 | repo.dirstate.drop(f1) |
|
1879 | 1907 | |
|
1880 | 1908 | # directory rename, move local |
|
1881 | 1909 | for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []): |
|
1882 | 1910 | f0, flag = args |
|
1883 | 1911 | if branchmerge: |
|
1884 | 1912 | repo.dirstate.add(f) |
|
1885 | 1913 | repo.dirstate.remove(f0) |
|
1886 | 1914 | repo.dirstate.copy(f0, f) |
|
1887 | 1915 | else: |
|
1888 | 1916 | repo.dirstate.normal(f) |
|
1889 | 1917 | repo.dirstate.drop(f0) |
|
1890 | 1918 | |
|
1891 | 1919 | # directory rename, get |
|
1892 | 1920 | for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []): |
|
1893 | 1921 | f0, flag = args |
|
1894 | 1922 | if branchmerge: |
|
1895 | 1923 | repo.dirstate.add(f) |
|
1896 | 1924 | repo.dirstate.copy(f0, f) |
|
1897 | 1925 | else: |
|
1898 | 1926 | repo.dirstate.normal(f) |
|
1899 | 1927 | |
|
1900 | 1928 | def update(repo, node, branchmerge, force, ancestor=None, |
|
1901 | 1929 | mergeancestor=False, labels=None, matcher=None, mergeforce=False, |
|
1902 | 1930 | updatecheck=None, wc=None): |
|
1903 | 1931 | """ |
|
1904 | 1932 | Perform a merge between the working directory and the given node |
|
1905 | 1933 | |
|
1906 | 1934 | node = the node to update to |
|
1907 | 1935 | branchmerge = whether to merge between branches |
|
1908 | 1936 | force = whether to force branch merging or file overwriting |
|
1909 | 1937 | matcher = a matcher to filter file lists (dirstate not updated) |
|
1910 | 1938 | mergeancestor = whether it is merging with an ancestor. If true, |
|
1911 | 1939 | we should accept the incoming changes for any prompts that occur. |
|
1912 | 1940 | If false, merging with an ancestor (fast-forward) is only allowed |
|
1913 | 1941 | between different named branches. This flag is used by rebase extension |
|
1914 | 1942 | as a temporary fix and should be avoided in general. |
|
1915 | 1943 | labels = labels to use for base, local and other |
|
1916 | 1944 | mergeforce = whether the merge was run with 'merge --force' (deprecated): if |
|
1917 | 1945 | this is True, then 'force' should be True as well. |
|
1918 | 1946 | |
|
1919 | 1947 | The table below shows all the behaviors of the update command given the |
|
1920 | 1948 | -c/--check and -C/--clean or no options, whether the working directory is |
|
1921 | 1949 | dirty, whether a revision is specified, and the relationship of the parent |
|
1922 | 1950 | rev to the target rev (linear or not). Match from top first. The -n |
|
1923 | 1951 | option doesn't exist on the command line, but represents the |
|
1924 | 1952 | experimental.updatecheck=noconflict option. |
|
1925 | 1953 | |
|
1926 | 1954 | This logic is tested by test-update-branches.t. |
|
1927 | 1955 | |
|
1928 | 1956 | -c -C -n -m dirty rev linear | result |
|
1929 | 1957 | y y * * * * * | (1) |
|
1930 | 1958 | y * y * * * * | (1) |
|
1931 | 1959 | y * * y * * * | (1) |
|
1932 | 1960 | * y y * * * * | (1) |
|
1933 | 1961 | * y * y * * * | (1) |
|
1934 | 1962 | * * y y * * * | (1) |
|
1935 | 1963 | * * * * * n n | x |
|
1936 | 1964 | * * * * n * * | ok |
|
1937 | 1965 | n n n n y * y | merge |
|
1938 | 1966 | n n n n y y n | (2) |
|
1939 | 1967 | n n n y y * * | merge |
|
1940 | 1968 | n n y n y * * | merge if no conflict |
|
1941 | 1969 | n y n n y * * | discard |
|
1942 | 1970 | y n n n y * * | (3) |
|
1943 | 1971 | |
|
1944 | 1972 | x = can't happen |
|
1945 | 1973 | * = don't-care |
|
1946 | 1974 | 1 = incompatible options (checked in commands.py) |
|
1947 | 1975 | 2 = abort: uncommitted changes (commit or update --clean to discard changes) |
|
1948 | 1976 | 3 = abort: uncommitted changes (checked in commands.py) |
|
1949 | 1977 | |
|
1950 | 1978 | The merge is performed inside ``wc``, a workingctx-like objects. It defaults |
|
1951 | 1979 | to repo[None] if None is passed. |
|
1952 | 1980 | |
|
1953 | 1981 | Return the same tuple as applyupdates(). |
|
1954 | 1982 | """ |
|
1955 | 1983 | # Avoid cycle. |
|
1956 | 1984 | from . import sparse |
|
1957 | 1985 | |
|
1958 | 1986 | # This function used to find the default destination if node was None, but |
|
1959 | 1987 | # that's now in destutil.py. |
|
1960 | 1988 | assert node is not None |
|
1961 | 1989 | if not branchmerge and not force: |
|
1962 | 1990 | # TODO: remove the default once all callers that pass branchmerge=False |
|
1963 | 1991 | # and force=False pass a value for updatecheck. We may want to allow |
|
1964 | 1992 | # updatecheck='abort' to better suppport some of these callers. |
|
1965 | 1993 | if updatecheck is None: |
|
1966 | 1994 | updatecheck = 'linear' |
|
1967 | 1995 | assert updatecheck in ('none', 'linear', 'noconflict') |
|
1968 | 1996 | # If we're doing a partial update, we need to skip updating |
|
1969 | 1997 | # the dirstate, so make a note of any partial-ness to the |
|
1970 | 1998 | # update here. |
|
1971 | 1999 | if matcher is None or matcher.always(): |
|
1972 | 2000 | partial = False |
|
1973 | 2001 | else: |
|
1974 | 2002 | partial = True |
|
1975 | 2003 | with repo.wlock(): |
|
1976 | 2004 | if wc is None: |
|
1977 | 2005 | wc = repo[None] |
|
1978 | 2006 | pl = wc.parents() |
|
1979 | 2007 | p1 = pl[0] |
|
1980 | 2008 | p2 = repo[node] |
|
1981 | 2009 | if ancestor is not None: |
|
1982 | 2010 | pas = [repo[ancestor]] |
|
1983 | 2011 | else: |
|
1984 | 2012 | if repo.ui.configlist('merge', 'preferancestor') == ['*']: |
|
1985 | 2013 | cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node()) |
|
1986 | 2014 | pas = [repo[anc] for anc in (sorted(cahs) or [nullid])] |
|
1987 | 2015 | else: |
|
1988 | 2016 | pas = [p1.ancestor(p2, warn=branchmerge)] |
|
1989 | 2017 | |
|
1990 | 2018 | fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2) |
|
1991 | 2019 | |
|
1992 | 2020 | overwrite = force and not branchmerge |
|
1993 | 2021 | ### check phase |
|
1994 | 2022 | if not overwrite: |
|
1995 | 2023 | if len(pl) > 1: |
|
1996 | 2024 | raise error.Abort(_("outstanding uncommitted merge")) |
|
1997 | 2025 | ms = mergestate.read(repo) |
|
1998 | 2026 | if list(ms.unresolved()): |
|
1999 | 2027 | raise error.Abort(_("outstanding merge conflicts")) |
|
2000 | 2028 | if branchmerge: |
|
2001 | 2029 | if pas == [p2]: |
|
2002 | 2030 | raise error.Abort(_("merging with a working directory ancestor" |
|
2003 | 2031 | " has no effect")) |
|
2004 | 2032 | elif pas == [p1]: |
|
2005 | 2033 | if not mergeancestor and wc.branch() == p2.branch(): |
|
2006 | 2034 | raise error.Abort(_("nothing to merge"), |
|
2007 | 2035 | hint=_("use 'hg update' " |
|
2008 | 2036 | "or check 'hg heads'")) |
|
2009 | 2037 | if not force and (wc.files() or wc.deleted()): |
|
2010 | 2038 | raise error.Abort(_("uncommitted changes"), |
|
2011 | 2039 | hint=_("use 'hg status' to list changes")) |
|
2012 | 2040 | if not wc.isinmemory(): |
|
2013 | 2041 | for s in sorted(wc.substate): |
|
2014 | 2042 | wc.sub(s).bailifchanged() |
|
2015 | 2043 | |
|
2016 | 2044 | elif not overwrite: |
|
2017 | 2045 | if p1 == p2: # no-op update |
|
2018 | 2046 | # call the hooks and exit early |
|
2019 | 2047 | repo.hook('preupdate', throw=True, parent1=xp2, parent2='') |
|
2020 | 2048 | repo.hook('update', parent1=xp2, parent2='', error=0) |
|
2021 | 2049 | return updateresult(0, 0, 0, 0) |
|
2022 | 2050 | |
|
2023 | 2051 | if (updatecheck == 'linear' and |
|
2024 | 2052 | pas not in ([p1], [p2])): # nonlinear |
|
2025 | 2053 | dirty = wc.dirty(missing=True) |
|
2026 | 2054 | if dirty: |
|
2027 | 2055 | # Branching is a bit strange to ensure we do the minimal |
|
2028 | 2056 | # amount of call to obsutil.foreground. |
|
2029 | 2057 | foreground = obsutil.foreground(repo, [p1.node()]) |
|
2030 | 2058 | # note: the <node> variable contains a random identifier |
|
2031 | 2059 | if repo[node].node() in foreground: |
|
2032 | 2060 | pass # allow updating to successors |
|
2033 | 2061 | else: |
|
2034 | 2062 | msg = _("uncommitted changes") |
|
2035 | 2063 | hint = _("commit or update --clean to discard changes") |
|
2036 | 2064 | raise error.UpdateAbort(msg, hint=hint) |
|
2037 | 2065 | else: |
|
2038 | 2066 | # Allow jumping branches if clean and specific rev given |
|
2039 | 2067 | pass |
|
2040 | 2068 | |
|
2041 | 2069 | if overwrite: |
|
2042 | 2070 | pas = [wc] |
|
2043 | 2071 | elif not branchmerge: |
|
2044 | 2072 | pas = [p1] |
|
2045 | 2073 | |
|
2046 | 2074 | # deprecated config: merge.followcopies |
|
2047 | 2075 | followcopies = repo.ui.configbool('merge', 'followcopies') |
|
2048 | 2076 | if overwrite: |
|
2049 | 2077 | followcopies = False |
|
2050 | 2078 | elif not pas[0]: |
|
2051 | 2079 | followcopies = False |
|
2052 | 2080 | if not branchmerge and not wc.dirty(missing=True): |
|
2053 | 2081 | followcopies = False |
|
2054 | 2082 | |
|
2055 | 2083 | ### calculate phase |
|
2056 | 2084 | actionbyfile, diverge, renamedelete = calculateupdates( |
|
2057 | 2085 | repo, wc, p2, pas, branchmerge, force, mergeancestor, |
|
2058 | 2086 | followcopies, matcher=matcher, mergeforce=mergeforce) |
|
2059 | 2087 | |
|
2060 | 2088 | if updatecheck == 'noconflict': |
|
2061 | 2089 | for f, (m, args, msg) in actionbyfile.iteritems(): |
|
2062 | 2090 | if m not in (ACTION_GET, ACTION_KEEP, ACTION_EXEC, |
|
2063 | 2091 | ACTION_REMOVE, ACTION_PATH_CONFLICT_RESOLVE): |
|
2064 | 2092 | msg = _("conflicting changes") |
|
2065 | 2093 | hint = _("commit or update --clean to discard changes") |
|
2066 | 2094 | raise error.Abort(msg, hint=hint) |
|
2067 | 2095 | |
|
2068 | 2096 | # Prompt and create actions. Most of this is in the resolve phase |
|
2069 | 2097 | # already, but we can't handle .hgsubstate in filemerge or |
|
2070 | 2098 | # subrepoutil.submerge yet so we have to keep prompting for it. |
|
2071 | 2099 | if '.hgsubstate' in actionbyfile: |
|
2072 | 2100 | f = '.hgsubstate' |
|
2073 | 2101 | m, args, msg = actionbyfile[f] |
|
2074 | 2102 | prompts = filemerge.partextras(labels) |
|
2075 | 2103 | prompts['f'] = f |
|
2076 | 2104 | if m == ACTION_CHANGED_DELETED: |
|
2077 | 2105 | if repo.ui.promptchoice( |
|
2078 | 2106 | _("local%(l)s changed %(f)s which other%(o)s deleted\n" |
|
2079 | 2107 | "use (c)hanged version or (d)elete?" |
|
2080 | 2108 | "$$ &Changed $$ &Delete") % prompts, 0): |
|
2081 | 2109 | actionbyfile[f] = (ACTION_REMOVE, None, 'prompt delete') |
|
2082 | 2110 | elif f in p1: |
|
2083 | 2111 | actionbyfile[f] = (ACTION_ADD_MODIFIED, None, 'prompt keep') |
|
2084 | 2112 | else: |
|
2085 | 2113 | actionbyfile[f] = (ACTION_ADD, None, 'prompt keep') |
|
2086 | 2114 | elif m == ACTION_DELETED_CHANGED: |
|
2087 | 2115 | f1, f2, fa, move, anc = args |
|
2088 | 2116 | flags = p2[f2].flags() |
|
2089 | 2117 | if repo.ui.promptchoice( |
|
2090 | 2118 | _("other%(o)s changed %(f)s which local%(l)s deleted\n" |
|
2091 | 2119 | "use (c)hanged version or leave (d)eleted?" |
|
2092 | 2120 | "$$ &Changed $$ &Deleted") % prompts, 0) == 0: |
|
2093 | 2121 | actionbyfile[f] = (ACTION_GET, (flags, False), |
|
2094 | 2122 | 'prompt recreating') |
|
2095 | 2123 | else: |
|
2096 | 2124 | del actionbyfile[f] |
|
2097 | 2125 | |
|
2098 | 2126 | # Convert to dictionary-of-lists format |
|
2099 | 2127 | actions = emptyactions() |
|
2100 | 2128 | for f, (m, args, msg) in actionbyfile.iteritems(): |
|
2101 | 2129 | if m not in actions: |
|
2102 | 2130 | actions[m] = [] |
|
2103 | 2131 | actions[m].append((f, args, msg)) |
|
2104 | 2132 | |
|
2105 | 2133 | if not util.fscasesensitive(repo.path): |
|
2106 | 2134 | # check collision between files only in p2 for clean update |
|
2107 | 2135 | if (not branchmerge and |
|
2108 | 2136 | (force or not wc.dirty(missing=True, branch=False))): |
|
2109 | 2137 | _checkcollision(repo, p2.manifest(), None) |
|
2110 | 2138 | else: |
|
2111 | 2139 | _checkcollision(repo, wc.manifest(), actions) |
|
2112 | 2140 | |
|
2113 | 2141 | # divergent renames |
|
2114 | 2142 | for f, fl in sorted(diverge.iteritems()): |
|
2115 | 2143 | repo.ui.warn(_("note: possible conflict - %s was renamed " |
|
2116 | 2144 | "multiple times to:\n") % f) |
|
2117 | 2145 | for nf in sorted(fl): |
|
2118 | 2146 | repo.ui.warn(" %s\n" % nf) |
|
2119 | 2147 | |
|
2120 | 2148 | # rename and delete |
|
2121 | 2149 | for f, fl in sorted(renamedelete.iteritems()): |
|
2122 | 2150 | repo.ui.warn(_("note: possible conflict - %s was deleted " |
|
2123 | 2151 | "and renamed to:\n") % f) |
|
2124 | 2152 | for nf in sorted(fl): |
|
2125 | 2153 | repo.ui.warn(" %s\n" % nf) |
|
2126 | 2154 | |
|
2127 | 2155 | ### apply phase |
|
2128 | 2156 | if not branchmerge: # just jump to the new rev |
|
2129 | 2157 | fp1, fp2, xp1, xp2 = fp2, nullid, xp2, '' |
|
2130 | 2158 | if not partial and not wc.isinmemory(): |
|
2131 | 2159 | repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2) |
|
2132 | 2160 | # note that we're in the middle of an update |
|
2133 | 2161 | repo.vfs.write('updatestate', p2.hex()) |
|
2134 | 2162 | |
|
2135 | 2163 | # Advertise fsmonitor when its presence could be useful. |
|
2136 | 2164 | # |
|
2137 | 2165 | # We only advertise when performing an update from an empty working |
|
2138 | 2166 | # directory. This typically only occurs during initial clone. |
|
2139 | 2167 | # |
|
2140 | 2168 | # We give users a mechanism to disable the warning in case it is |
|
2141 | 2169 | # annoying. |
|
2142 | 2170 | # |
|
2143 | 2171 | # We only allow on Linux and MacOS because that's where fsmonitor is |
|
2144 | 2172 | # considered stable. |
|
2145 | 2173 | fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused') |
|
2146 | 2174 | fsmonitorthreshold = repo.ui.configint('fsmonitor', |
|
2147 | 2175 | 'warn_update_file_count') |
|
2148 | 2176 | try: |
|
2149 | 2177 | # avoid cycle: extensions -> cmdutil -> merge |
|
2150 | 2178 | from . import extensions |
|
2151 | 2179 | extensions.find('fsmonitor') |
|
2152 | 2180 | fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off' |
|
2153 | 2181 | # We intentionally don't look at whether fsmonitor has disabled |
|
2154 | 2182 | # itself because a) fsmonitor may have already printed a warning |
|
2155 | 2183 | # b) we only care about the config state here. |
|
2156 | 2184 | except KeyError: |
|
2157 | 2185 | fsmonitorenabled = False |
|
2158 | 2186 | |
|
2159 | 2187 | if (fsmonitorwarning |
|
2160 | 2188 | and not fsmonitorenabled |
|
2161 | 2189 | and p1.node() == nullid |
|
2162 | 2190 | and len(actions[ACTION_GET]) >= fsmonitorthreshold |
|
2163 | 2191 | and pycompat.sysplatform.startswith(('linux', 'darwin'))): |
|
2164 | 2192 | repo.ui.warn( |
|
2165 | 2193 | _('(warning: large working directory being used without ' |
|
2166 | 2194 | 'fsmonitor enabled; enable fsmonitor to improve performance; ' |
|
2167 | 2195 | 'see "hg help -e fsmonitor")\n')) |
|
2168 | 2196 | |
|
2169 | stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels) | |
|
2197 | updatedirstate = not partial and not wc.isinmemory() | |
|
2198 | wantfiledata = updatedirstate and not branchmerge | |
|
2199 | stats, getfiledata = applyupdates(repo, actions, wc, p2, overwrite, | |
|
2200 | wantfiledata, labels=labels) | |
|
2170 | 2201 | |
|
2171 | if not partial and not wc.isinmemory(): | |
|
2202 | if updatedirstate: | |
|
2172 | 2203 | with repo.dirstate.parentchange(): |
|
2173 | 2204 | repo.setparents(fp1, fp2) |
|
2174 | recordupdates(repo, actions, branchmerge) | |
|
2205 | recordupdates(repo, actions, branchmerge, getfiledata) | |
|
2175 | 2206 | # update completed, clear state |
|
2176 | 2207 | util.unlink(repo.vfs.join('updatestate')) |
|
2177 | 2208 | |
|
2178 | 2209 | if not branchmerge: |
|
2179 | 2210 | repo.dirstate.setbranch(p2.branch()) |
|
2180 | 2211 | |
|
2181 | 2212 | # If we're updating to a location, clean up any stale temporary includes |
|
2182 | 2213 | # (ex: this happens during hg rebase --abort). |
|
2183 | 2214 | if not branchmerge: |
|
2184 | 2215 | sparse.prunetemporaryincludes(repo) |
|
2185 | 2216 | |
|
2186 | 2217 | if not partial: |
|
2187 | 2218 | repo.hook('update', parent1=xp1, parent2=xp2, |
|
2188 | 2219 | error=stats.unresolvedcount) |
|
2189 | 2220 | return stats |
|
2190 | 2221 | |
|
2191 | 2222 | def graft(repo, ctx, pctx, labels=None, keepparent=False, |
|
2192 | 2223 | keepconflictparent=False): |
|
2193 | 2224 | """Do a graft-like merge. |
|
2194 | 2225 | |
|
2195 | 2226 | This is a merge where the merge ancestor is chosen such that one |
|
2196 | 2227 | or more changesets are grafted onto the current changeset. In |
|
2197 | 2228 | addition to the merge, this fixes up the dirstate to include only |
|
2198 | 2229 | a single parent (if keepparent is False) and tries to duplicate any |
|
2199 | 2230 | renames/copies appropriately. |
|
2200 | 2231 | |
|
2201 | 2232 | ctx - changeset to rebase |
|
2202 | 2233 | pctx - merge base, usually ctx.p1() |
|
2203 | 2234 | labels - merge labels eg ['local', 'graft'] |
|
2204 | 2235 | keepparent - keep second parent if any |
|
2205 | 2236 | keepconflictparent - if unresolved, keep parent used for the merge |
|
2206 | 2237 | |
|
2207 | 2238 | """ |
|
2208 | 2239 | # If we're grafting a descendant onto an ancestor, be sure to pass |
|
2209 | 2240 | # mergeancestor=True to update. This does two things: 1) allows the merge if |
|
2210 | 2241 | # the destination is the same as the parent of the ctx (so we can use graft |
|
2211 | 2242 | # to copy commits), and 2) informs update that the incoming changes are |
|
2212 | 2243 | # newer than the destination so it doesn't prompt about "remote changed foo |
|
2213 | 2244 | # which local deleted". |
|
2214 | 2245 | mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node()) |
|
2215 | 2246 | |
|
2216 | 2247 | stats = update(repo, ctx.node(), True, True, pctx.node(), |
|
2217 | 2248 | mergeancestor=mergeancestor, labels=labels) |
|
2218 | 2249 | |
|
2219 | 2250 | |
|
2220 | 2251 | if keepconflictparent and stats.unresolvedcount: |
|
2221 | 2252 | pother = ctx.node() |
|
2222 | 2253 | else: |
|
2223 | 2254 | pother = nullid |
|
2224 | 2255 | parents = ctx.parents() |
|
2225 | 2256 | if keepparent and len(parents) == 2 and pctx in parents: |
|
2226 | 2257 | parents.remove(pctx) |
|
2227 | 2258 | pother = parents[0].node() |
|
2228 | 2259 | |
|
2229 | 2260 | with repo.dirstate.parentchange(): |
|
2230 | 2261 | repo.setparents(repo['.'].node(), pother) |
|
2231 | 2262 | repo.dirstate.write(repo.currenttransaction()) |
|
2232 | 2263 | # fix up dirstate for copies and renames |
|
2233 | 2264 | copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev()) |
|
2234 | 2265 | return stats |
|
2235 | 2266 | |
|
2236 | 2267 | def purge(repo, matcher, ignored=False, removeemptydirs=True, |
|
2237 | 2268 | removefiles=True, abortonerror=False, noop=False): |
|
2238 | 2269 | """Purge the working directory of untracked files. |
|
2239 | 2270 | |
|
2240 | 2271 | ``matcher`` is a matcher configured to scan the working directory - |
|
2241 | 2272 | potentially a subset. |
|
2242 | 2273 | |
|
2243 | 2274 | ``ignored`` controls whether ignored files should also be purged. |
|
2244 | 2275 | |
|
2245 | 2276 | ``removeemptydirs`` controls whether empty directories should be removed. |
|
2246 | 2277 | |
|
2247 | 2278 | ``removefiles`` controls whether files are removed. |
|
2248 | 2279 | |
|
2249 | 2280 | ``abortonerror`` causes an exception to be raised if an error occurs |
|
2250 | 2281 | deleting a file or directory. |
|
2251 | 2282 | |
|
2252 | 2283 | ``noop`` controls whether to actually remove files. If not defined, actions |
|
2253 | 2284 | will be taken. |
|
2254 | 2285 | |
|
2255 | 2286 | Returns an iterable of relative paths in the working directory that were |
|
2256 | 2287 | or would be removed. |
|
2257 | 2288 | """ |
|
2258 | 2289 | |
|
2259 | 2290 | def remove(removefn, path): |
|
2260 | 2291 | try: |
|
2261 | 2292 | removefn(path) |
|
2262 | 2293 | except OSError: |
|
2263 | 2294 | m = _('%s cannot be removed') % path |
|
2264 | 2295 | if abortonerror: |
|
2265 | 2296 | raise error.Abort(m) |
|
2266 | 2297 | else: |
|
2267 | 2298 | repo.ui.warn(_('warning: %s\n') % m) |
|
2268 | 2299 | |
|
2269 | 2300 | # There's no API to copy a matcher. So mutate the passed matcher and |
|
2270 | 2301 | # restore it when we're done. |
|
2271 | 2302 | oldexplicitdir = matcher.explicitdir |
|
2272 | 2303 | oldtraversedir = matcher.traversedir |
|
2273 | 2304 | |
|
2274 | 2305 | res = [] |
|
2275 | 2306 | |
|
2276 | 2307 | try: |
|
2277 | 2308 | if removeemptydirs: |
|
2278 | 2309 | directories = [] |
|
2279 | 2310 | matcher.explicitdir = matcher.traversedir = directories.append |
|
2280 | 2311 | |
|
2281 | 2312 | status = repo.status(match=matcher, ignored=ignored, unknown=True) |
|
2282 | 2313 | |
|
2283 | 2314 | if removefiles: |
|
2284 | 2315 | for f in sorted(status.unknown + status.ignored): |
|
2285 | 2316 | if not noop: |
|
2286 | 2317 | repo.ui.note(_('removing file %s\n') % f) |
|
2287 | 2318 | remove(repo.wvfs.unlink, f) |
|
2288 | 2319 | res.append(f) |
|
2289 | 2320 | |
|
2290 | 2321 | if removeemptydirs: |
|
2291 | 2322 | for f in sorted(directories, reverse=True): |
|
2292 | 2323 | if matcher(f) and not repo.wvfs.listdir(f): |
|
2293 | 2324 | if not noop: |
|
2294 | 2325 | repo.ui.note(_('removing directory %s\n') % f) |
|
2295 | 2326 | remove(repo.wvfs.rmdir, f) |
|
2296 | 2327 | res.append(f) |
|
2297 | 2328 | |
|
2298 | 2329 | return res |
|
2299 | 2330 | |
|
2300 | 2331 | finally: |
|
2301 | 2332 | matcher.explicitdir = oldexplicitdir |
|
2302 | 2333 | matcher.traversedir = oldtraversedir |
@@ -1,317 +1,317 | |||
|
1 | 1 | # narrowspec.py - methods for working with a narrow view of a repository |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2017 Google, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | from .i18n import _ |
|
11 | 11 | from . import ( |
|
12 | 12 | error, |
|
13 | 13 | match as matchmod, |
|
14 | 14 | merge, |
|
15 | 15 | repository, |
|
16 | 16 | scmutil, |
|
17 | 17 | sparse, |
|
18 | 18 | util, |
|
19 | 19 | ) |
|
20 | 20 | |
|
21 | 21 | # The file in .hg/store/ that indicates which paths exit in the store |
|
22 | 22 | FILENAME = 'narrowspec' |
|
23 | 23 | # The file in .hg/ that indicates which paths exit in the dirstate |
|
24 | 24 | DIRSTATE_FILENAME = 'narrowspec.dirstate' |
|
25 | 25 | |
|
26 | 26 | # Pattern prefixes that are allowed in narrow patterns. This list MUST |
|
27 | 27 | # only contain patterns that are fast and safe to evaluate. Keep in mind |
|
28 | 28 | # that patterns are supplied by clients and executed on remote servers |
|
29 | 29 | # as part of wire protocol commands. That means that changes to this |
|
30 | 30 | # data structure influence the wire protocol and should not be taken |
|
31 | 31 | # lightly - especially removals. |
|
32 | 32 | VALID_PREFIXES = ( |
|
33 | 33 | b'path:', |
|
34 | 34 | b'rootfilesin:', |
|
35 | 35 | ) |
|
36 | 36 | |
|
37 | 37 | def normalizesplitpattern(kind, pat): |
|
38 | 38 | """Returns the normalized version of a pattern and kind. |
|
39 | 39 | |
|
40 | 40 | Returns a tuple with the normalized kind and normalized pattern. |
|
41 | 41 | """ |
|
42 | 42 | pat = pat.rstrip('/') |
|
43 | 43 | _validatepattern(pat) |
|
44 | 44 | return kind, pat |
|
45 | 45 | |
|
46 | 46 | def _numlines(s): |
|
47 | 47 | """Returns the number of lines in s, including ending empty lines.""" |
|
48 | 48 | # We use splitlines because it is Unicode-friendly and thus Python 3 |
|
49 | 49 | # compatible. However, it does not count empty lines at the end, so trick |
|
50 | 50 | # it by adding a character at the end. |
|
51 | 51 | return len((s + 'x').splitlines()) |
|
52 | 52 | |
|
53 | 53 | def _validatepattern(pat): |
|
54 | 54 | """Validates the pattern and aborts if it is invalid. |
|
55 | 55 | |
|
56 | 56 | Patterns are stored in the narrowspec as newline-separated |
|
57 | 57 | POSIX-style bytestring paths. There's no escaping. |
|
58 | 58 | """ |
|
59 | 59 | |
|
60 | 60 | # We use newlines as separators in the narrowspec file, so don't allow them |
|
61 | 61 | # in patterns. |
|
62 | 62 | if _numlines(pat) > 1: |
|
63 | 63 | raise error.Abort(_('newlines are not allowed in narrowspec paths')) |
|
64 | 64 | |
|
65 | 65 | components = pat.split('/') |
|
66 | 66 | if '.' in components or '..' in components: |
|
67 | 67 | raise error.Abort(_('"." and ".." are not allowed in narrowspec paths')) |
|
68 | 68 | |
|
69 | 69 | def normalizepattern(pattern, defaultkind='path'): |
|
70 | 70 | """Returns the normalized version of a text-format pattern. |
|
71 | 71 | |
|
72 | 72 | If the pattern has no kind, the default will be added. |
|
73 | 73 | """ |
|
74 | 74 | kind, pat = matchmod._patsplit(pattern, defaultkind) |
|
75 | 75 | return '%s:%s' % normalizesplitpattern(kind, pat) |
|
76 | 76 | |
|
77 | 77 | def parsepatterns(pats): |
|
78 | 78 | """Parses an iterable of patterns into a typed pattern set. |
|
79 | 79 | |
|
80 | 80 | Patterns are assumed to be ``path:`` if no prefix is present. |
|
81 | 81 | For safety and performance reasons, only some prefixes are allowed. |
|
82 | 82 | See ``validatepatterns()``. |
|
83 | 83 | |
|
84 | 84 | This function should be used on patterns that come from the user to |
|
85 | 85 | normalize and validate them to the internal data structure used for |
|
86 | 86 | representing patterns. |
|
87 | 87 | """ |
|
88 | 88 | res = {normalizepattern(orig) for orig in pats} |
|
89 | 89 | validatepatterns(res) |
|
90 | 90 | return res |
|
91 | 91 | |
|
92 | 92 | def validatepatterns(pats): |
|
93 | 93 | """Validate that patterns are in the expected data structure and format. |
|
94 | 94 | |
|
95 | 95 | And that is a set of normalized patterns beginning with ``path:`` or |
|
96 | 96 | ``rootfilesin:``. |
|
97 | 97 | |
|
98 | 98 | This function should be used to validate internal data structures |
|
99 | 99 | and patterns that are loaded from sources that use the internal, |
|
100 | 100 | prefixed pattern representation (but can't necessarily be fully trusted). |
|
101 | 101 | """ |
|
102 | 102 | if not isinstance(pats, set): |
|
103 | 103 | raise error.ProgrammingError('narrow patterns should be a set; ' |
|
104 | 104 | 'got %r' % pats) |
|
105 | 105 | |
|
106 | 106 | for pat in pats: |
|
107 | 107 | if not pat.startswith(VALID_PREFIXES): |
|
108 | 108 | # Use a Mercurial exception because this can happen due to user |
|
109 | 109 | # bugs (e.g. manually updating spec file). |
|
110 | 110 | raise error.Abort(_('invalid prefix on narrow pattern: %s') % pat, |
|
111 | 111 | hint=_('narrow patterns must begin with one of ' |
|
112 | 112 | 'the following: %s') % |
|
113 | 113 | ', '.join(VALID_PREFIXES)) |
|
114 | 114 | |
|
115 | 115 | def format(includes, excludes): |
|
116 | 116 | output = '[include]\n' |
|
117 | 117 | for i in sorted(includes - excludes): |
|
118 | 118 | output += i + '\n' |
|
119 | 119 | output += '[exclude]\n' |
|
120 | 120 | for e in sorted(excludes): |
|
121 | 121 | output += e + '\n' |
|
122 | 122 | return output |
|
123 | 123 | |
|
124 | 124 | def match(root, include=None, exclude=None): |
|
125 | 125 | if not include: |
|
126 | 126 | # Passing empty include and empty exclude to matchmod.match() |
|
127 | 127 | # gives a matcher that matches everything, so explicitly use |
|
128 | 128 | # the nevermatcher. |
|
129 | 129 | return matchmod.never() |
|
130 | 130 | return matchmod.match(root, '', [], include=include or [], |
|
131 | 131 | exclude=exclude or []) |
|
132 | 132 | |
|
133 | 133 | def parseconfig(ui, spec): |
|
134 | 134 | # maybe we should care about the profiles returned too |
|
135 | 135 | includepats, excludepats, profiles = sparse.parseconfig(ui, spec, 'narrow') |
|
136 | 136 | if profiles: |
|
137 | 137 | raise error.Abort(_("including other spec files using '%include' is not" |
|
138 | 138 | " supported in narrowspec")) |
|
139 | 139 | |
|
140 | 140 | validatepatterns(includepats) |
|
141 | 141 | validatepatterns(excludepats) |
|
142 | 142 | |
|
143 | 143 | return includepats, excludepats |
|
144 | 144 | |
|
145 | 145 | def load(repo): |
|
146 | 146 | # Treat "narrowspec does not exist" the same as "narrowspec file exists |
|
147 | 147 | # and is empty". |
|
148 | 148 | spec = repo.svfs.tryread(FILENAME) |
|
149 | 149 | return parseconfig(repo.ui, spec) |
|
150 | 150 | |
|
151 | 151 | def save(repo, includepats, excludepats): |
|
152 | 152 | validatepatterns(includepats) |
|
153 | 153 | validatepatterns(excludepats) |
|
154 | 154 | spec = format(includepats, excludepats) |
|
155 | 155 | repo.svfs.write(FILENAME, spec) |
|
156 | 156 | |
|
157 | 157 | def copytoworkingcopy(repo): |
|
158 | 158 | spec = repo.svfs.read(FILENAME) |
|
159 | 159 | repo.vfs.write(DIRSTATE_FILENAME, spec) |
|
160 | 160 | |
|
161 | 161 | def savebackup(repo, backupname): |
|
162 | 162 | if repository.NARROW_REQUIREMENT not in repo.requirements: |
|
163 | 163 | return |
|
164 | 164 | svfs = repo.svfs |
|
165 | 165 | svfs.tryunlink(backupname) |
|
166 | 166 | util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True) |
|
167 | 167 | |
|
168 | 168 | def restorebackup(repo, backupname): |
|
169 | 169 | if repository.NARROW_REQUIREMENT not in repo.requirements: |
|
170 | 170 | return |
|
171 | 171 | util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME)) |
|
172 | 172 | |
|
173 | 173 | def savewcbackup(repo, backupname): |
|
174 | 174 | if repository.NARROW_REQUIREMENT not in repo.requirements: |
|
175 | 175 | return |
|
176 | 176 | vfs = repo.vfs |
|
177 | 177 | vfs.tryunlink(backupname) |
|
178 | 178 | # It may not exist in old repos |
|
179 | 179 | if vfs.exists(DIRSTATE_FILENAME): |
|
180 | 180 | util.copyfile(vfs.join(DIRSTATE_FILENAME), vfs.join(backupname), |
|
181 | 181 | hardlink=True) |
|
182 | 182 | |
|
183 | 183 | def restorewcbackup(repo, backupname): |
|
184 | 184 | if repository.NARROW_REQUIREMENT not in repo.requirements: |
|
185 | 185 | return |
|
186 | 186 | # It may not exist in old repos |
|
187 | 187 | if repo.vfs.exists(backupname): |
|
188 | 188 | util.rename(repo.vfs.join(backupname), repo.vfs.join(DIRSTATE_FILENAME)) |
|
189 | 189 | |
|
190 | 190 | def clearwcbackup(repo, backupname): |
|
191 | 191 | if repository.NARROW_REQUIREMENT not in repo.requirements: |
|
192 | 192 | return |
|
193 | 193 | repo.vfs.tryunlink(backupname) |
|
194 | 194 | |
|
195 | 195 | def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes): |
|
196 | 196 | r""" Restricts the patterns according to repo settings, |
|
197 | 197 | results in a logical AND operation |
|
198 | 198 | |
|
199 | 199 | :param req_includes: requested includes |
|
200 | 200 | :param req_excludes: requested excludes |
|
201 | 201 | :param repo_includes: repo includes |
|
202 | 202 | :param repo_excludes: repo excludes |
|
203 | 203 | :return: include patterns, exclude patterns, and invalid include patterns. |
|
204 | 204 | |
|
205 | 205 | >>> restrictpatterns({'f1','f2'}, {}, ['f1'], []) |
|
206 | 206 | (set(['f1']), {}, []) |
|
207 | 207 | >>> restrictpatterns({'f1'}, {}, ['f1','f2'], []) |
|
208 | 208 | (set(['f1']), {}, []) |
|
209 | 209 | >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], []) |
|
210 | 210 | (set(['f1/fc1']), {}, []) |
|
211 | 211 | >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], []) |
|
212 | 212 | ([], set(['path:.']), []) |
|
213 | 213 | >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], []) |
|
214 | 214 | (set(['f2/fc2']), {}, []) |
|
215 | 215 | >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], []) |
|
216 | 216 | ([], set(['path:.']), []) |
|
217 | 217 | >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], []) |
|
218 | 218 | (set(['f1/$non_exitent_var']), {}, []) |
|
219 | 219 | """ |
|
220 | 220 | res_excludes = set(req_excludes) |
|
221 | 221 | res_excludes.update(repo_excludes) |
|
222 | 222 | invalid_includes = [] |
|
223 | 223 | if not req_includes: |
|
224 | 224 | res_includes = set(repo_includes) |
|
225 | 225 | elif 'path:.' not in repo_includes: |
|
226 | 226 | res_includes = [] |
|
227 | 227 | for req_include in req_includes: |
|
228 | 228 | req_include = util.expandpath(util.normpath(req_include)) |
|
229 | 229 | if req_include in repo_includes: |
|
230 | 230 | res_includes.append(req_include) |
|
231 | 231 | continue |
|
232 | 232 | valid = False |
|
233 | 233 | for repo_include in repo_includes: |
|
234 | 234 | if req_include.startswith(repo_include + '/'): |
|
235 | 235 | valid = True |
|
236 | 236 | res_includes.append(req_include) |
|
237 | 237 | break |
|
238 | 238 | if not valid: |
|
239 | 239 | invalid_includes.append(req_include) |
|
240 | 240 | if len(res_includes) == 0: |
|
241 | 241 | res_excludes = {'path:.'} |
|
242 | 242 | else: |
|
243 | 243 | res_includes = set(res_includes) |
|
244 | 244 | else: |
|
245 | 245 | res_includes = set(req_includes) |
|
246 | 246 | return res_includes, res_excludes, invalid_includes |
|
247 | 247 | |
|
248 | 248 | # These two are extracted for extensions (specifically for Google's CitC file |
|
249 | 249 | # system) |
|
250 | 250 | def _deletecleanfiles(repo, files): |
|
251 | 251 | for f in files: |
|
252 | 252 | repo.wvfs.unlinkpath(f) |
|
253 | 253 | |
|
254 | 254 | def _writeaddedfiles(repo, pctx, files): |
|
255 | 255 | actions = merge.emptyactions() |
|
256 | 256 | addgaction = actions[merge.ACTION_GET].append |
|
257 | 257 | mf = repo['.'].manifest() |
|
258 | 258 | for f in files: |
|
259 | 259 | if not repo.wvfs.exists(f): |
|
260 | 260 | addgaction((f, (mf.flags(f), False), "narrowspec updated")) |
|
261 | 261 | merge.applyupdates(repo, actions, wctx=repo[None], |
|
262 | mctx=repo['.'], overwrite=False) | |
|
262 | mctx=repo['.'], overwrite=False, wantfiledata=False) | |
|
263 | 263 | |
|
264 | 264 | def checkworkingcopynarrowspec(repo): |
|
265 | 265 | # Avoid infinite recursion when updating the working copy |
|
266 | 266 | if getattr(repo, '_updatingnarrowspec', False): |
|
267 | 267 | return |
|
268 | 268 | storespec = repo.svfs.tryread(FILENAME) |
|
269 | 269 | wcspec = repo.vfs.tryread(DIRSTATE_FILENAME) |
|
270 | 270 | if wcspec != storespec: |
|
271 | 271 | raise error.Abort(_("working copy's narrowspec is stale"), |
|
272 | 272 | hint=_("run 'hg tracked --update-working-copy'")) |
|
273 | 273 | |
|
274 | 274 | def updateworkingcopy(repo, assumeclean=False): |
|
275 | 275 | """updates the working copy and dirstate from the store narrowspec |
|
276 | 276 | |
|
277 | 277 | When assumeclean=True, files that are not known to be clean will also |
|
278 | 278 | be deleted. It is then up to the caller to make sure they are clean. |
|
279 | 279 | """ |
|
280 | 280 | oldspec = repo.vfs.tryread(DIRSTATE_FILENAME) |
|
281 | 281 | newspec = repo.svfs.tryread(FILENAME) |
|
282 | 282 | repo._updatingnarrowspec = True |
|
283 | 283 | |
|
284 | 284 | oldincludes, oldexcludes = parseconfig(repo.ui, oldspec) |
|
285 | 285 | newincludes, newexcludes = parseconfig(repo.ui, newspec) |
|
286 | 286 | oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes) |
|
287 | 287 | newmatch = match(repo.root, include=newincludes, exclude=newexcludes) |
|
288 | 288 | addedmatch = matchmod.differencematcher(newmatch, oldmatch) |
|
289 | 289 | removedmatch = matchmod.differencematcher(oldmatch, newmatch) |
|
290 | 290 | |
|
291 | 291 | ds = repo.dirstate |
|
292 | 292 | lookup, status = ds.status(removedmatch, subrepos=[], ignored=True, |
|
293 | 293 | clean=True, unknown=True) |
|
294 | 294 | trackeddirty = status.modified + status.added |
|
295 | 295 | clean = status.clean |
|
296 | 296 | if assumeclean: |
|
297 | 297 | assert not trackeddirty |
|
298 | 298 | clean.extend(lookup) |
|
299 | 299 | else: |
|
300 | 300 | trackeddirty.extend(lookup) |
|
301 | 301 | _deletecleanfiles(repo, clean) |
|
302 | 302 | uipathfn = scmutil.getuipathfn(repo) |
|
303 | 303 | for f in sorted(trackeddirty): |
|
304 | 304 | repo.ui.status(_('not deleting possibly dirty file %s\n') % uipathfn(f)) |
|
305 | 305 | for f in sorted(status.unknown): |
|
306 | 306 | repo.ui.status(_('not deleting unknown file %s\n') % uipathfn(f)) |
|
307 | 307 | for f in sorted(status.ignored): |
|
308 | 308 | repo.ui.status(_('not deleting ignored file %s\n') % uipathfn(f)) |
|
309 | 309 | for f in clean + trackeddirty: |
|
310 | 310 | ds.drop(f) |
|
311 | 311 | |
|
312 | 312 | pctx = repo['.'] |
|
313 | 313 | newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds] |
|
314 | 314 | for f in newfiles: |
|
315 | 315 | ds.normallookup(f) |
|
316 | 316 | _writeaddedfiles(repo, pctx, newfiles) |
|
317 | 317 | repo._updatingnarrowspec = False |
@@ -1,700 +1,702 | |||
|
1 | 1 | # sparse.py - functionality for sparse checkouts |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2014 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import hashlib |
|
11 | 11 | import os |
|
12 | 12 | |
|
13 | 13 | from .i18n import _ |
|
14 | 14 | from .node import ( |
|
15 | 15 | hex, |
|
16 | 16 | nullid, |
|
17 | 17 | ) |
|
18 | 18 | from . import ( |
|
19 | 19 | error, |
|
20 | 20 | match as matchmod, |
|
21 | 21 | merge as mergemod, |
|
22 | 22 | pathutil, |
|
23 | 23 | pycompat, |
|
24 | 24 | scmutil, |
|
25 | 25 | util, |
|
26 | 26 | ) |
|
27 | 27 | |
|
28 | 28 | # Whether sparse features are enabled. This variable is intended to be |
|
29 | 29 | # temporary to facilitate porting sparse to core. It should eventually be |
|
30 | 30 | # a per-repo option, possibly a repo requirement. |
|
31 | 31 | enabled = False |
|
32 | 32 | |
|
33 | 33 | def parseconfig(ui, raw, action): |
|
34 | 34 | """Parse sparse config file content. |
|
35 | 35 | |
|
36 | 36 | action is the command which is trigerring this read, can be narrow, sparse |
|
37 | 37 | |
|
38 | 38 | Returns a tuple of includes, excludes, and profiles. |
|
39 | 39 | """ |
|
40 | 40 | includes = set() |
|
41 | 41 | excludes = set() |
|
42 | 42 | profiles = set() |
|
43 | 43 | current = None |
|
44 | 44 | havesection = False |
|
45 | 45 | |
|
46 | 46 | for line in raw.split('\n'): |
|
47 | 47 | line = line.strip() |
|
48 | 48 | if not line or line.startswith('#'): |
|
49 | 49 | # empty or comment line, skip |
|
50 | 50 | continue |
|
51 | 51 | elif line.startswith('%include '): |
|
52 | 52 | line = line[9:].strip() |
|
53 | 53 | if line: |
|
54 | 54 | profiles.add(line) |
|
55 | 55 | elif line == '[include]': |
|
56 | 56 | if havesection and current != includes: |
|
57 | 57 | # TODO pass filename into this API so we can report it. |
|
58 | 58 | raise error.Abort(_('%(action)s config cannot have includes ' |
|
59 | 59 | 'after excludes') % {'action': action}) |
|
60 | 60 | havesection = True |
|
61 | 61 | current = includes |
|
62 | 62 | continue |
|
63 | 63 | elif line == '[exclude]': |
|
64 | 64 | havesection = True |
|
65 | 65 | current = excludes |
|
66 | 66 | elif line: |
|
67 | 67 | if current is None: |
|
68 | 68 | raise error.Abort(_('%(action)s config entry outside of ' |
|
69 | 69 | 'section: %(line)s') |
|
70 | 70 | % {'action': action, 'line': line}, |
|
71 | 71 | hint=_('add an [include] or [exclude] line ' |
|
72 | 72 | 'to declare the entry type')) |
|
73 | 73 | |
|
74 | 74 | if line.strip().startswith('/'): |
|
75 | 75 | ui.warn(_('warning: %(action)s profile cannot use' |
|
76 | 76 | ' paths starting with /, ignoring %(line)s\n') |
|
77 | 77 | % {'action': action, 'line': line}) |
|
78 | 78 | continue |
|
79 | 79 | current.add(line) |
|
80 | 80 | |
|
81 | 81 | return includes, excludes, profiles |
|
82 | 82 | |
|
83 | 83 | # Exists as separate function to facilitate monkeypatching. |
|
84 | 84 | def readprofile(repo, profile, changeid): |
|
85 | 85 | """Resolve the raw content of a sparse profile file.""" |
|
86 | 86 | # TODO add some kind of cache here because this incurs a manifest |
|
87 | 87 | # resolve and can be slow. |
|
88 | 88 | return repo.filectx(profile, changeid=changeid).data() |
|
89 | 89 | |
|
90 | 90 | def patternsforrev(repo, rev): |
|
91 | 91 | """Obtain sparse checkout patterns for the given rev. |
|
92 | 92 | |
|
93 | 93 | Returns a tuple of iterables representing includes, excludes, and |
|
94 | 94 | patterns. |
|
95 | 95 | """ |
|
96 | 96 | # Feature isn't enabled. No-op. |
|
97 | 97 | if not enabled: |
|
98 | 98 | return set(), set(), set() |
|
99 | 99 | |
|
100 | 100 | raw = repo.vfs.tryread('sparse') |
|
101 | 101 | if not raw: |
|
102 | 102 | return set(), set(), set() |
|
103 | 103 | |
|
104 | 104 | if rev is None: |
|
105 | 105 | raise error.Abort(_('cannot parse sparse patterns from working ' |
|
106 | 106 | 'directory')) |
|
107 | 107 | |
|
108 | 108 | includes, excludes, profiles = parseconfig(repo.ui, raw, 'sparse') |
|
109 | 109 | ctx = repo[rev] |
|
110 | 110 | |
|
111 | 111 | if profiles: |
|
112 | 112 | visited = set() |
|
113 | 113 | while profiles: |
|
114 | 114 | profile = profiles.pop() |
|
115 | 115 | if profile in visited: |
|
116 | 116 | continue |
|
117 | 117 | |
|
118 | 118 | visited.add(profile) |
|
119 | 119 | |
|
120 | 120 | try: |
|
121 | 121 | raw = readprofile(repo, profile, rev) |
|
122 | 122 | except error.ManifestLookupError: |
|
123 | 123 | msg = ( |
|
124 | 124 | "warning: sparse profile '%s' not found " |
|
125 | 125 | "in rev %s - ignoring it\n" % (profile, ctx)) |
|
126 | 126 | # experimental config: sparse.missingwarning |
|
127 | 127 | if repo.ui.configbool( |
|
128 | 128 | 'sparse', 'missingwarning'): |
|
129 | 129 | repo.ui.warn(msg) |
|
130 | 130 | else: |
|
131 | 131 | repo.ui.debug(msg) |
|
132 | 132 | continue |
|
133 | 133 | |
|
134 | 134 | pincludes, pexcludes, subprofs = parseconfig(repo.ui, raw, 'sparse') |
|
135 | 135 | includes.update(pincludes) |
|
136 | 136 | excludes.update(pexcludes) |
|
137 | 137 | profiles.update(subprofs) |
|
138 | 138 | |
|
139 | 139 | profiles = visited |
|
140 | 140 | |
|
141 | 141 | if includes: |
|
142 | 142 | includes.add('.hg*') |
|
143 | 143 | |
|
144 | 144 | return includes, excludes, profiles |
|
145 | 145 | |
|
146 | 146 | def activeconfig(repo): |
|
147 | 147 | """Determine the active sparse config rules. |
|
148 | 148 | |
|
149 | 149 | Rules are constructed by reading the current sparse config and bringing in |
|
150 | 150 | referenced profiles from parents of the working directory. |
|
151 | 151 | """ |
|
152 | 152 | revs = [repo.changelog.rev(node) for node in |
|
153 | 153 | repo.dirstate.parents() if node != nullid] |
|
154 | 154 | |
|
155 | 155 | allincludes = set() |
|
156 | 156 | allexcludes = set() |
|
157 | 157 | allprofiles = set() |
|
158 | 158 | |
|
159 | 159 | for rev in revs: |
|
160 | 160 | includes, excludes, profiles = patternsforrev(repo, rev) |
|
161 | 161 | allincludes |= includes |
|
162 | 162 | allexcludes |= excludes |
|
163 | 163 | allprofiles |= profiles |
|
164 | 164 | |
|
165 | 165 | return allincludes, allexcludes, allprofiles |
|
166 | 166 | |
|
167 | 167 | def configsignature(repo, includetemp=True): |
|
168 | 168 | """Obtain the signature string for the current sparse configuration. |
|
169 | 169 | |
|
170 | 170 | This is used to construct a cache key for matchers. |
|
171 | 171 | """ |
|
172 | 172 | cache = repo._sparsesignaturecache |
|
173 | 173 | |
|
174 | 174 | signature = cache.get('signature') |
|
175 | 175 | |
|
176 | 176 | if includetemp: |
|
177 | 177 | tempsignature = cache.get('tempsignature') |
|
178 | 178 | else: |
|
179 | 179 | tempsignature = '0' |
|
180 | 180 | |
|
181 | 181 | if signature is None or (includetemp and tempsignature is None): |
|
182 | 182 | signature = hex(hashlib.sha1(repo.vfs.tryread('sparse')).digest()) |
|
183 | 183 | cache['signature'] = signature |
|
184 | 184 | |
|
185 | 185 | if includetemp: |
|
186 | 186 | raw = repo.vfs.tryread('tempsparse') |
|
187 | 187 | tempsignature = hex(hashlib.sha1(raw).digest()) |
|
188 | 188 | cache['tempsignature'] = tempsignature |
|
189 | 189 | |
|
190 | 190 | return '%s %s' % (signature, tempsignature) |
|
191 | 191 | |
|
192 | 192 | def writeconfig(repo, includes, excludes, profiles): |
|
193 | 193 | """Write the sparse config file given a sparse configuration.""" |
|
194 | 194 | with repo.vfs('sparse', 'wb') as fh: |
|
195 | 195 | for p in sorted(profiles): |
|
196 | 196 | fh.write('%%include %s\n' % p) |
|
197 | 197 | |
|
198 | 198 | if includes: |
|
199 | 199 | fh.write('[include]\n') |
|
200 | 200 | for i in sorted(includes): |
|
201 | 201 | fh.write(i) |
|
202 | 202 | fh.write('\n') |
|
203 | 203 | |
|
204 | 204 | if excludes: |
|
205 | 205 | fh.write('[exclude]\n') |
|
206 | 206 | for e in sorted(excludes): |
|
207 | 207 | fh.write(e) |
|
208 | 208 | fh.write('\n') |
|
209 | 209 | |
|
210 | 210 | repo._sparsesignaturecache.clear() |
|
211 | 211 | |
|
212 | 212 | def readtemporaryincludes(repo): |
|
213 | 213 | raw = repo.vfs.tryread('tempsparse') |
|
214 | 214 | if not raw: |
|
215 | 215 | return set() |
|
216 | 216 | |
|
217 | 217 | return set(raw.split('\n')) |
|
218 | 218 | |
|
219 | 219 | def writetemporaryincludes(repo, includes): |
|
220 | 220 | repo.vfs.write('tempsparse', '\n'.join(sorted(includes))) |
|
221 | 221 | repo._sparsesignaturecache.clear() |
|
222 | 222 | |
|
223 | 223 | def addtemporaryincludes(repo, additional): |
|
224 | 224 | includes = readtemporaryincludes(repo) |
|
225 | 225 | for i in additional: |
|
226 | 226 | includes.add(i) |
|
227 | 227 | writetemporaryincludes(repo, includes) |
|
228 | 228 | |
|
229 | 229 | def prunetemporaryincludes(repo): |
|
230 | 230 | if not enabled or not repo.vfs.exists('tempsparse'): |
|
231 | 231 | return |
|
232 | 232 | |
|
233 | 233 | s = repo.status() |
|
234 | 234 | if s.modified or s.added or s.removed or s.deleted: |
|
235 | 235 | # Still have pending changes. Don't bother trying to prune. |
|
236 | 236 | return |
|
237 | 237 | |
|
238 | 238 | sparsematch = matcher(repo, includetemp=False) |
|
239 | 239 | dirstate = repo.dirstate |
|
240 | 240 | actions = [] |
|
241 | 241 | dropped = [] |
|
242 | 242 | tempincludes = readtemporaryincludes(repo) |
|
243 | 243 | for file in tempincludes: |
|
244 | 244 | if file in dirstate and not sparsematch(file): |
|
245 | 245 | message = _('dropping temporarily included sparse files') |
|
246 | 246 | actions.append((file, None, message)) |
|
247 | 247 | dropped.append(file) |
|
248 | 248 | |
|
249 | 249 | typeactions = mergemod.emptyactions() |
|
250 | 250 | typeactions['r'] = actions |
|
251 |
mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False |
|
|
251 | mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False, | |
|
252 | wantfiledata=False) | |
|
252 | 253 | |
|
253 | 254 | # Fix dirstate |
|
254 | 255 | for file in dropped: |
|
255 | 256 | dirstate.drop(file) |
|
256 | 257 | |
|
257 | 258 | repo.vfs.unlink('tempsparse') |
|
258 | 259 | repo._sparsesignaturecache.clear() |
|
259 | 260 | msg = _('cleaned up %d temporarily added file(s) from the ' |
|
260 | 261 | 'sparse checkout\n') |
|
261 | 262 | repo.ui.status(msg % len(tempincludes)) |
|
262 | 263 | |
|
263 | 264 | def forceincludematcher(matcher, includes): |
|
264 | 265 | """Returns a matcher that returns true for any of the forced includes |
|
265 | 266 | before testing against the actual matcher.""" |
|
266 | 267 | kindpats = [('path', include, '') for include in includes] |
|
267 | 268 | includematcher = matchmod.includematcher('', kindpats) |
|
268 | 269 | return matchmod.unionmatcher([includematcher, matcher]) |
|
269 | 270 | |
|
270 | 271 | def matcher(repo, revs=None, includetemp=True): |
|
271 | 272 | """Obtain a matcher for sparse working directories for the given revs. |
|
272 | 273 | |
|
273 | 274 | If multiple revisions are specified, the matcher is the union of all |
|
274 | 275 | revs. |
|
275 | 276 | |
|
276 | 277 | ``includetemp`` indicates whether to use the temporary sparse profile. |
|
277 | 278 | """ |
|
278 | 279 | # If sparse isn't enabled, sparse matcher matches everything. |
|
279 | 280 | if not enabled: |
|
280 | 281 | return matchmod.always() |
|
281 | 282 | |
|
282 | 283 | if not revs or revs == [None]: |
|
283 | 284 | revs = [repo.changelog.rev(node) |
|
284 | 285 | for node in repo.dirstate.parents() if node != nullid] |
|
285 | 286 | |
|
286 | 287 | signature = configsignature(repo, includetemp=includetemp) |
|
287 | 288 | |
|
288 | 289 | key = '%s %s' % (signature, ' '.join(map(pycompat.bytestr, revs))) |
|
289 | 290 | |
|
290 | 291 | result = repo._sparsematchercache.get(key) |
|
291 | 292 | if result: |
|
292 | 293 | return result |
|
293 | 294 | |
|
294 | 295 | matchers = [] |
|
295 | 296 | for rev in revs: |
|
296 | 297 | try: |
|
297 | 298 | includes, excludes, profiles = patternsforrev(repo, rev) |
|
298 | 299 | |
|
299 | 300 | if includes or excludes: |
|
300 | 301 | matcher = matchmod.match(repo.root, '', [], |
|
301 | 302 | include=includes, exclude=excludes, |
|
302 | 303 | default='relpath') |
|
303 | 304 | matchers.append(matcher) |
|
304 | 305 | except IOError: |
|
305 | 306 | pass |
|
306 | 307 | |
|
307 | 308 | if not matchers: |
|
308 | 309 | result = matchmod.always() |
|
309 | 310 | elif len(matchers) == 1: |
|
310 | 311 | result = matchers[0] |
|
311 | 312 | else: |
|
312 | 313 | result = matchmod.unionmatcher(matchers) |
|
313 | 314 | |
|
314 | 315 | if includetemp: |
|
315 | 316 | tempincludes = readtemporaryincludes(repo) |
|
316 | 317 | result = forceincludematcher(result, tempincludes) |
|
317 | 318 | |
|
318 | 319 | repo._sparsematchercache[key] = result |
|
319 | 320 | |
|
320 | 321 | return result |
|
321 | 322 | |
|
322 | 323 | def filterupdatesactions(repo, wctx, mctx, branchmerge, actions): |
|
323 | 324 | """Filter updates to only lay out files that match the sparse rules.""" |
|
324 | 325 | if not enabled: |
|
325 | 326 | return actions |
|
326 | 327 | |
|
327 | 328 | oldrevs = [pctx.rev() for pctx in wctx.parents()] |
|
328 | 329 | oldsparsematch = matcher(repo, oldrevs) |
|
329 | 330 | |
|
330 | 331 | if oldsparsematch.always(): |
|
331 | 332 | return actions |
|
332 | 333 | |
|
333 | 334 | files = set() |
|
334 | 335 | prunedactions = {} |
|
335 | 336 | |
|
336 | 337 | if branchmerge: |
|
337 | 338 | # If we're merging, use the wctx filter, since we're merging into |
|
338 | 339 | # the wctx. |
|
339 | 340 | sparsematch = matcher(repo, [wctx.p1().rev()]) |
|
340 | 341 | else: |
|
341 | 342 | # If we're updating, use the target context's filter, since we're |
|
342 | 343 | # moving to the target context. |
|
343 | 344 | sparsematch = matcher(repo, [mctx.rev()]) |
|
344 | 345 | |
|
345 | 346 | temporaryfiles = [] |
|
346 | 347 | for file, action in actions.iteritems(): |
|
347 | 348 | type, args, msg = action |
|
348 | 349 | files.add(file) |
|
349 | 350 | if sparsematch(file): |
|
350 | 351 | prunedactions[file] = action |
|
351 | 352 | elif type == 'm': |
|
352 | 353 | temporaryfiles.append(file) |
|
353 | 354 | prunedactions[file] = action |
|
354 | 355 | elif branchmerge: |
|
355 | 356 | if type != 'k': |
|
356 | 357 | temporaryfiles.append(file) |
|
357 | 358 | prunedactions[file] = action |
|
358 | 359 | elif type == 'f': |
|
359 | 360 | prunedactions[file] = action |
|
360 | 361 | elif file in wctx: |
|
361 | 362 | prunedactions[file] = ('r', args, msg) |
|
362 | 363 | |
|
363 | 364 | if branchmerge and type == mergemod.ACTION_MERGE: |
|
364 | 365 | f1, f2, fa, move, anc = args |
|
365 | 366 | if not sparsematch(f1): |
|
366 | 367 | temporaryfiles.append(f1) |
|
367 | 368 | |
|
368 | 369 | if len(temporaryfiles) > 0: |
|
369 | 370 | repo.ui.status(_('temporarily included %d file(s) in the sparse ' |
|
370 | 371 | 'checkout for merging\n') % len(temporaryfiles)) |
|
371 | 372 | addtemporaryincludes(repo, temporaryfiles) |
|
372 | 373 | |
|
373 | 374 | # Add the new files to the working copy so they can be merged, etc |
|
374 | 375 | actions = [] |
|
375 | 376 | message = 'temporarily adding to sparse checkout' |
|
376 | 377 | wctxmanifest = repo[None].manifest() |
|
377 | 378 | for file in temporaryfiles: |
|
378 | 379 | if file in wctxmanifest: |
|
379 | 380 | fctx = repo[None][file] |
|
380 | 381 | actions.append((file, (fctx.flags(), False), message)) |
|
381 | 382 | |
|
382 | 383 | typeactions = mergemod.emptyactions() |
|
383 | 384 | typeactions['g'] = actions |
|
384 | 385 | mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], |
|
385 | False) | |
|
386 | False, wantfiledata=False) | |
|
386 | 387 | |
|
387 | 388 | dirstate = repo.dirstate |
|
388 | 389 | for file, flags, msg in actions: |
|
389 | 390 | dirstate.normal(file) |
|
390 | 391 | |
|
391 | 392 | profiles = activeconfig(repo)[2] |
|
392 | 393 | changedprofiles = profiles & files |
|
393 | 394 | # If an active profile changed during the update, refresh the checkout. |
|
394 | 395 | # Don't do this during a branch merge, since all incoming changes should |
|
395 | 396 | # have been handled by the temporary includes above. |
|
396 | 397 | if changedprofiles and not branchmerge: |
|
397 | 398 | mf = mctx.manifest() |
|
398 | 399 | for file in mf: |
|
399 | 400 | old = oldsparsematch(file) |
|
400 | 401 | new = sparsematch(file) |
|
401 | 402 | if not old and new: |
|
402 | 403 | flags = mf.flags(file) |
|
403 | 404 | prunedactions[file] = ('g', (flags, False), '') |
|
404 | 405 | elif old and not new: |
|
405 | 406 | prunedactions[file] = ('r', [], '') |
|
406 | 407 | |
|
407 | 408 | return prunedactions |
|
408 | 409 | |
|
409 | 410 | def refreshwdir(repo, origstatus, origsparsematch, force=False): |
|
410 | 411 | """Refreshes working directory by taking sparse config into account. |
|
411 | 412 | |
|
412 | 413 | The old status and sparse matcher is compared against the current sparse |
|
413 | 414 | matcher. |
|
414 | 415 | |
|
415 | 416 | Will abort if a file with pending changes is being excluded or included |
|
416 | 417 | unless ``force`` is True. |
|
417 | 418 | """ |
|
418 | 419 | # Verify there are no pending changes |
|
419 | 420 | pending = set() |
|
420 | 421 | pending.update(origstatus.modified) |
|
421 | 422 | pending.update(origstatus.added) |
|
422 | 423 | pending.update(origstatus.removed) |
|
423 | 424 | sparsematch = matcher(repo) |
|
424 | 425 | abort = False |
|
425 | 426 | |
|
426 | 427 | for f in pending: |
|
427 | 428 | if not sparsematch(f): |
|
428 | 429 | repo.ui.warn(_("pending changes to '%s'\n") % f) |
|
429 | 430 | abort = not force |
|
430 | 431 | |
|
431 | 432 | if abort: |
|
432 | 433 | raise error.Abort(_('could not update sparseness due to pending ' |
|
433 | 434 | 'changes')) |
|
434 | 435 | |
|
435 | 436 | # Calculate actions |
|
436 | 437 | dirstate = repo.dirstate |
|
437 | 438 | ctx = repo['.'] |
|
438 | 439 | added = [] |
|
439 | 440 | lookup = [] |
|
440 | 441 | dropped = [] |
|
441 | 442 | mf = ctx.manifest() |
|
442 | 443 | files = set(mf) |
|
443 | 444 | |
|
444 | 445 | actions = {} |
|
445 | 446 | |
|
446 | 447 | for file in files: |
|
447 | 448 | old = origsparsematch(file) |
|
448 | 449 | new = sparsematch(file) |
|
449 | 450 | # Add files that are newly included, or that don't exist in |
|
450 | 451 | # the dirstate yet. |
|
451 | 452 | if (new and not old) or (old and new and not file in dirstate): |
|
452 | 453 | fl = mf.flags(file) |
|
453 | 454 | if repo.wvfs.exists(file): |
|
454 | 455 | actions[file] = ('e', (fl,), '') |
|
455 | 456 | lookup.append(file) |
|
456 | 457 | else: |
|
457 | 458 | actions[file] = ('g', (fl, False), '') |
|
458 | 459 | added.append(file) |
|
459 | 460 | # Drop files that are newly excluded, or that still exist in |
|
460 | 461 | # the dirstate. |
|
461 | 462 | elif (old and not new) or (not old and not new and file in dirstate): |
|
462 | 463 | dropped.append(file) |
|
463 | 464 | if file not in pending: |
|
464 | 465 | actions[file] = ('r', [], '') |
|
465 | 466 | |
|
466 | 467 | # Verify there are no pending changes in newly included files |
|
467 | 468 | abort = False |
|
468 | 469 | for file in lookup: |
|
469 | 470 | repo.ui.warn(_("pending changes to '%s'\n") % file) |
|
470 | 471 | abort = not force |
|
471 | 472 | if abort: |
|
472 | 473 | raise error.Abort(_('cannot change sparseness due to pending ' |
|
473 | 474 | 'changes (delete the files or use ' |
|
474 | 475 | '--force to bring them back dirty)')) |
|
475 | 476 | |
|
476 | 477 | # Check for files that were only in the dirstate. |
|
477 | 478 | for file, state in dirstate.iteritems(): |
|
478 | 479 | if not file in files: |
|
479 | 480 | old = origsparsematch(file) |
|
480 | 481 | new = sparsematch(file) |
|
481 | 482 | if old and not new: |
|
482 | 483 | dropped.append(file) |
|
483 | 484 | |
|
484 | 485 | # Apply changes to disk |
|
485 | 486 | typeactions = mergemod.emptyactions() |
|
486 | 487 | for f, (m, args, msg) in actions.iteritems(): |
|
487 | 488 | typeactions[m].append((f, args, msg)) |
|
488 | 489 | |
|
489 |
mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False |
|
|
490 | mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False, | |
|
491 | wantfiledata=False) | |
|
490 | 492 | |
|
491 | 493 | # Fix dirstate |
|
492 | 494 | for file in added: |
|
493 | 495 | dirstate.normal(file) |
|
494 | 496 | |
|
495 | 497 | for file in dropped: |
|
496 | 498 | dirstate.drop(file) |
|
497 | 499 | |
|
498 | 500 | for file in lookup: |
|
499 | 501 | # File exists on disk, and we're bringing it back in an unknown state. |
|
500 | 502 | dirstate.normallookup(file) |
|
501 | 503 | |
|
502 | 504 | return added, dropped, lookup |
|
503 | 505 | |
|
504 | 506 | def aftercommit(repo, node): |
|
505 | 507 | """Perform actions after a working directory commit.""" |
|
506 | 508 | # This function is called unconditionally, even if sparse isn't |
|
507 | 509 | # enabled. |
|
508 | 510 | ctx = repo[node] |
|
509 | 511 | |
|
510 | 512 | profiles = patternsforrev(repo, ctx.rev())[2] |
|
511 | 513 | |
|
512 | 514 | # profiles will only have data if sparse is enabled. |
|
513 | 515 | if profiles & set(ctx.files()): |
|
514 | 516 | origstatus = repo.status() |
|
515 | 517 | origsparsematch = matcher(repo) |
|
516 | 518 | refreshwdir(repo, origstatus, origsparsematch, force=True) |
|
517 | 519 | |
|
518 | 520 | prunetemporaryincludes(repo) |
|
519 | 521 | |
|
520 | 522 | def _updateconfigandrefreshwdir(repo, includes, excludes, profiles, |
|
521 | 523 | force=False, removing=False): |
|
522 | 524 | """Update the sparse config and working directory state.""" |
|
523 | 525 | raw = repo.vfs.tryread('sparse') |
|
524 | 526 | oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw, 'sparse') |
|
525 | 527 | |
|
526 | 528 | oldstatus = repo.status() |
|
527 | 529 | oldmatch = matcher(repo) |
|
528 | 530 | oldrequires = set(repo.requirements) |
|
529 | 531 | |
|
530 | 532 | # TODO remove this try..except once the matcher integrates better |
|
531 | 533 | # with dirstate. We currently have to write the updated config |
|
532 | 534 | # because that will invalidate the matcher cache and force a |
|
533 | 535 | # re-read. We ideally want to update the cached matcher on the |
|
534 | 536 | # repo instance then flush the new config to disk once wdir is |
|
535 | 537 | # updated. But this requires massive rework to matcher() and its |
|
536 | 538 | # consumers. |
|
537 | 539 | |
|
538 | 540 | if 'exp-sparse' in oldrequires and removing: |
|
539 | 541 | repo.requirements.discard('exp-sparse') |
|
540 | 542 | scmutil.writerequires(repo.vfs, repo.requirements) |
|
541 | 543 | elif 'exp-sparse' not in oldrequires: |
|
542 | 544 | repo.requirements.add('exp-sparse') |
|
543 | 545 | scmutil.writerequires(repo.vfs, repo.requirements) |
|
544 | 546 | |
|
545 | 547 | try: |
|
546 | 548 | writeconfig(repo, includes, excludes, profiles) |
|
547 | 549 | return refreshwdir(repo, oldstatus, oldmatch, force=force) |
|
548 | 550 | except Exception: |
|
549 | 551 | if repo.requirements != oldrequires: |
|
550 | 552 | repo.requirements.clear() |
|
551 | 553 | repo.requirements |= oldrequires |
|
552 | 554 | scmutil.writerequires(repo.vfs, repo.requirements) |
|
553 | 555 | writeconfig(repo, oldincludes, oldexcludes, oldprofiles) |
|
554 | 556 | raise |
|
555 | 557 | |
|
556 | 558 | def clearrules(repo, force=False): |
|
557 | 559 | """Clears include/exclude rules from the sparse config. |
|
558 | 560 | |
|
559 | 561 | The remaining sparse config only has profiles, if defined. The working |
|
560 | 562 | directory is refreshed, as needed. |
|
561 | 563 | """ |
|
562 | 564 | with repo.wlock(): |
|
563 | 565 | raw = repo.vfs.tryread('sparse') |
|
564 | 566 | includes, excludes, profiles = parseconfig(repo.ui, raw, 'sparse') |
|
565 | 567 | |
|
566 | 568 | if not includes and not excludes: |
|
567 | 569 | return |
|
568 | 570 | |
|
569 | 571 | _updateconfigandrefreshwdir(repo, set(), set(), profiles, force=force) |
|
570 | 572 | |
|
571 | 573 | def importfromfiles(repo, opts, paths, force=False): |
|
572 | 574 | """Import sparse config rules from files. |
|
573 | 575 | |
|
574 | 576 | The updated sparse config is written out and the working directory |
|
575 | 577 | is refreshed, as needed. |
|
576 | 578 | """ |
|
577 | 579 | with repo.wlock(): |
|
578 | 580 | # read current configuration |
|
579 | 581 | raw = repo.vfs.tryread('sparse') |
|
580 | 582 | includes, excludes, profiles = parseconfig(repo.ui, raw, 'sparse') |
|
581 | 583 | aincludes, aexcludes, aprofiles = activeconfig(repo) |
|
582 | 584 | |
|
583 | 585 | # Import rules on top; only take in rules that are not yet |
|
584 | 586 | # part of the active rules. |
|
585 | 587 | changed = False |
|
586 | 588 | for p in paths: |
|
587 | 589 | with util.posixfile(util.expandpath(p), mode='rb') as fh: |
|
588 | 590 | raw = fh.read() |
|
589 | 591 | |
|
590 | 592 | iincludes, iexcludes, iprofiles = parseconfig(repo.ui, raw, |
|
591 | 593 | 'sparse') |
|
592 | 594 | oldsize = len(includes) + len(excludes) + len(profiles) |
|
593 | 595 | includes.update(iincludes - aincludes) |
|
594 | 596 | excludes.update(iexcludes - aexcludes) |
|
595 | 597 | profiles.update(iprofiles - aprofiles) |
|
596 | 598 | if len(includes) + len(excludes) + len(profiles) > oldsize: |
|
597 | 599 | changed = True |
|
598 | 600 | |
|
599 | 601 | profilecount = includecount = excludecount = 0 |
|
600 | 602 | fcounts = (0, 0, 0) |
|
601 | 603 | |
|
602 | 604 | if changed: |
|
603 | 605 | profilecount = len(profiles - aprofiles) |
|
604 | 606 | includecount = len(includes - aincludes) |
|
605 | 607 | excludecount = len(excludes - aexcludes) |
|
606 | 608 | |
|
607 | 609 | fcounts = map(len, _updateconfigandrefreshwdir( |
|
608 | 610 | repo, includes, excludes, profiles, force=force)) |
|
609 | 611 | |
|
610 | 612 | printchanges(repo.ui, opts, profilecount, includecount, excludecount, |
|
611 | 613 | *fcounts) |
|
612 | 614 | |
|
613 | 615 | def updateconfig(repo, pats, opts, include=False, exclude=False, reset=False, |
|
614 | 616 | delete=False, enableprofile=False, disableprofile=False, |
|
615 | 617 | force=False, usereporootpaths=False): |
|
616 | 618 | """Perform a sparse config update. |
|
617 | 619 | |
|
618 | 620 | Only one of the actions may be performed. |
|
619 | 621 | |
|
620 | 622 | The new config is written out and a working directory refresh is performed. |
|
621 | 623 | """ |
|
622 | 624 | with repo.wlock(): |
|
623 | 625 | raw = repo.vfs.tryread('sparse') |
|
624 | 626 | oldinclude, oldexclude, oldprofiles = parseconfig(repo.ui, raw, |
|
625 | 627 | 'sparse') |
|
626 | 628 | |
|
627 | 629 | if reset: |
|
628 | 630 | newinclude = set() |
|
629 | 631 | newexclude = set() |
|
630 | 632 | newprofiles = set() |
|
631 | 633 | else: |
|
632 | 634 | newinclude = set(oldinclude) |
|
633 | 635 | newexclude = set(oldexclude) |
|
634 | 636 | newprofiles = set(oldprofiles) |
|
635 | 637 | |
|
636 | 638 | if any(os.path.isabs(pat) for pat in pats): |
|
637 | 639 | raise error.Abort(_('paths cannot be absolute')) |
|
638 | 640 | |
|
639 | 641 | if not usereporootpaths: |
|
640 | 642 | # let's treat paths as relative to cwd |
|
641 | 643 | root, cwd = repo.root, repo.getcwd() |
|
642 | 644 | abspats = [] |
|
643 | 645 | for kindpat in pats: |
|
644 | 646 | kind, pat = matchmod._patsplit(kindpat, None) |
|
645 | 647 | if kind in matchmod.cwdrelativepatternkinds or kind is None: |
|
646 | 648 | ap = ((kind + ':' if kind else '') + |
|
647 | 649 | pathutil.canonpath(root, cwd, pat)) |
|
648 | 650 | abspats.append(ap) |
|
649 | 651 | else: |
|
650 | 652 | abspats.append(kindpat) |
|
651 | 653 | pats = abspats |
|
652 | 654 | |
|
653 | 655 | if include: |
|
654 | 656 | newinclude.update(pats) |
|
655 | 657 | elif exclude: |
|
656 | 658 | newexclude.update(pats) |
|
657 | 659 | elif enableprofile: |
|
658 | 660 | newprofiles.update(pats) |
|
659 | 661 | elif disableprofile: |
|
660 | 662 | newprofiles.difference_update(pats) |
|
661 | 663 | elif delete: |
|
662 | 664 | newinclude.difference_update(pats) |
|
663 | 665 | newexclude.difference_update(pats) |
|
664 | 666 | |
|
665 | 667 | profilecount = (len(newprofiles - oldprofiles) - |
|
666 | 668 | len(oldprofiles - newprofiles)) |
|
667 | 669 | includecount = (len(newinclude - oldinclude) - |
|
668 | 670 | len(oldinclude - newinclude)) |
|
669 | 671 | excludecount = (len(newexclude - oldexclude) - |
|
670 | 672 | len(oldexclude - newexclude)) |
|
671 | 673 | |
|
672 | 674 | fcounts = map(len, _updateconfigandrefreshwdir( |
|
673 | 675 | repo, newinclude, newexclude, newprofiles, force=force, |
|
674 | 676 | removing=reset)) |
|
675 | 677 | |
|
676 | 678 | printchanges(repo.ui, opts, profilecount, includecount, |
|
677 | 679 | excludecount, *fcounts) |
|
678 | 680 | |
|
679 | 681 | def printchanges(ui, opts, profilecount=0, includecount=0, excludecount=0, |
|
680 | 682 | added=0, dropped=0, conflicting=0): |
|
681 | 683 | """Print output summarizing sparse config changes.""" |
|
682 | 684 | with ui.formatter('sparse', opts) as fm: |
|
683 | 685 | fm.startitem() |
|
684 | 686 | fm.condwrite(ui.verbose, 'profiles_added', _('Profiles changed: %d\n'), |
|
685 | 687 | profilecount) |
|
686 | 688 | fm.condwrite(ui.verbose, 'include_rules_added', |
|
687 | 689 | _('Include rules changed: %d\n'), includecount) |
|
688 | 690 | fm.condwrite(ui.verbose, 'exclude_rules_added', |
|
689 | 691 | _('Exclude rules changed: %d\n'), excludecount) |
|
690 | 692 | |
|
691 | 693 | # In 'plain' verbose mode, mergemod.applyupdates already outputs what |
|
692 | 694 | # files are added or removed outside of the templating formatter |
|
693 | 695 | # framework. No point in repeating ourselves in that case. |
|
694 | 696 | if not fm.isplain(): |
|
695 | 697 | fm.condwrite(ui.verbose, 'files_added', _('Files added: %d\n'), |
|
696 | 698 | added) |
|
697 | 699 | fm.condwrite(ui.verbose, 'files_dropped', _('Files dropped: %d\n'), |
|
698 | 700 | dropped) |
|
699 | 701 | fm.condwrite(ui.verbose, 'files_conflicting', |
|
700 | 702 | _('Files conflicting: %d\n'), conflicting) |
@@ -1,39 +1,37 | |||
|
1 | 1 | Checking the size/permissions/file-type of files stored in the |
|
2 | 2 | dirstate after an update where the files are changed concurrently |
|
3 | 3 | outside of hg's control. |
|
4 | 4 | |
|
5 | 5 | $ hg init repo |
|
6 | 6 | $ cd repo |
|
7 | 7 | $ echo a > a |
|
8 | 8 | $ hg commit -qAm _ |
|
9 | 9 | $ echo aa > a |
|
10 | 10 | $ hg commit -m _ |
|
11 | 11 | |
|
12 | 12 | $ hg debugdirstate --no-dates |
|
13 | 13 | n 644 3 (set |unset) a (re) |
|
14 | 14 | |
|
15 | 15 | $ cat >> $TESTTMP/dirstaterace.py << EOF |
|
16 | 16 | > from mercurial import ( |
|
17 | 17 | > extensions, |
|
18 | 18 | > merge, |
|
19 | 19 | > ) |
|
20 | 20 | > def extsetup(ui): |
|
21 | 21 | > extensions.wrapfunction(merge, 'applyupdates', wrap) |
|
22 | 22 | > def wrap(orig, *args, **kwargs): |
|
23 | 23 | > res = orig(*args, **kwargs) |
|
24 | 24 | > with open("a", "w"): |
|
25 | 25 | > pass # just truncate the file |
|
26 | 26 | > return res |
|
27 | 27 | > EOF |
|
28 | 28 | |
|
29 | 29 | Do an update where file 'a' is changed between hg writing it to disk |
|
30 | and hg writing the dirstate. It results in a corrupted dirstate, which | |
|
31 | stores the wrong size, and thus hg status shows spuriously modified | |
|
32 | files. | |
|
30 | and hg writing the dirstate. The dirstate is correct nonetheless, and | |
|
31 | so hg status correctly shows a as clean. | |
|
33 | 32 | |
|
34 | 33 | $ hg up -r 0 --config extensions.race=$TESTTMP/dirstaterace.py |
|
35 | 34 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
36 | 35 | $ hg debugdirstate --no-dates |
|
37 |
n 644 |
|
|
36 | n 644 2 (set |unset) a (re) | |
|
38 | 37 | $ echo a > a; hg status; hg diff |
|
39 | M a |
@@ -1,93 +1,93 | |||
|
1 | 1 | ------ Test dirstate._dirs refcounting |
|
2 | 2 | |
|
3 | 3 | $ hg init t |
|
4 | 4 | $ cd t |
|
5 | 5 | $ mkdir -p a/b/c/d |
|
6 | 6 | $ touch a/b/c/d/x |
|
7 | 7 | $ touch a/b/c/d/y |
|
8 | 8 | $ touch a/b/c/d/z |
|
9 | 9 | $ hg ci -Am m |
|
10 | 10 | adding a/b/c/d/x |
|
11 | 11 | adding a/b/c/d/y |
|
12 | 12 | adding a/b/c/d/z |
|
13 | 13 | $ hg mv a z |
|
14 | 14 | moving a/b/c/d/x to z/b/c/d/x |
|
15 | 15 | moving a/b/c/d/y to z/b/c/d/y |
|
16 | 16 | moving a/b/c/d/z to z/b/c/d/z |
|
17 | 17 | |
|
18 | 18 | Test name collisions |
|
19 | 19 | |
|
20 | 20 | $ rm z/b/c/d/x |
|
21 | 21 | $ mkdir z/b/c/d/x |
|
22 | 22 | $ touch z/b/c/d/x/y |
|
23 | 23 | $ hg add z/b/c/d/x/y |
|
24 | 24 | abort: file 'z/b/c/d/x' in dirstate clashes with 'z/b/c/d/x/y' |
|
25 | 25 | [255] |
|
26 | 26 | $ rm -rf z/b/c/d |
|
27 | 27 | $ touch z/b/c/d |
|
28 | 28 | $ hg add z/b/c/d |
|
29 | 29 | abort: directory 'z/b/c/d' already in dirstate |
|
30 | 30 | [255] |
|
31 | 31 | |
|
32 | 32 | $ cd .. |
|
33 | 33 | |
|
34 | 34 | Issue1790: dirstate entry locked into unset if file mtime is set into |
|
35 | 35 | the future |
|
36 | 36 | |
|
37 | 37 | Prepare test repo: |
|
38 | 38 | |
|
39 | 39 | $ hg init u |
|
40 | 40 | $ cd u |
|
41 | 41 | $ echo a > a |
|
42 | 42 | $ hg add |
|
43 | 43 | adding a |
|
44 | 44 | $ hg ci -m1 |
|
45 | 45 | |
|
46 | 46 | Set mtime of a into the future: |
|
47 | 47 | |
|
48 | 48 | $ touch -t 202101011200 a |
|
49 | 49 | |
|
50 | 50 | Status must not set a's entry to unset (issue1790): |
|
51 | 51 | |
|
52 | 52 | $ hg status |
|
53 | 53 | $ hg debugstate |
|
54 | 54 | n 644 2 2021-01-01 12:00:00 a |
|
55 | 55 | |
|
56 | 56 | Test modulo storage/comparison of absurd dates: |
|
57 | 57 | |
|
58 | 58 | #if no-aix |
|
59 | 59 | $ touch -t 195001011200 a |
|
60 | 60 | $ hg st |
|
61 | 61 | $ hg debugstate |
|
62 | 62 | n 644 2 2018-01-19 15:14:08 a |
|
63 | 63 | #endif |
|
64 | 64 | |
|
65 | 65 | Verify that exceptions during a dirstate change leave the dirstate |
|
66 | 66 | coherent (issue4353) |
|
67 | 67 | |
|
68 | 68 | $ cat > ../dirstateexception.py <<EOF |
|
69 | 69 | > from __future__ import absolute_import |
|
70 | 70 | > from mercurial import ( |
|
71 | 71 | > error, |
|
72 | 72 | > extensions, |
|
73 | 73 | > merge, |
|
74 | 74 | > ) |
|
75 | 75 | > |
|
76 |
> def wraprecordupdates( |
|
|
76 | > def wraprecordupdates(*args): | |
|
77 | 77 | > raise error.Abort("simulated error while recording dirstateupdates") |
|
78 | 78 | > |
|
79 | 79 | > def reposetup(ui, repo): |
|
80 | 80 | > extensions.wrapfunction(merge, 'recordupdates', wraprecordupdates) |
|
81 | 81 | > EOF |
|
82 | 82 | |
|
83 | 83 | $ hg rm a |
|
84 | 84 | $ hg commit -m 'rm a' |
|
85 | 85 | $ echo "[extensions]" >> .hg/hgrc |
|
86 | 86 | $ echo "dirstateex=../dirstateexception.py" >> .hg/hgrc |
|
87 | 87 | $ hg up 0 |
|
88 | 88 | abort: simulated error while recording dirstateupdates |
|
89 | 89 | [255] |
|
90 | 90 | $ hg log -r . -T '{rev}\n' |
|
91 | 91 | 1 |
|
92 | 92 | $ hg status |
|
93 | 93 | ? a |
General Comments 0
You need to be logged in to leave comments.
Login now