##// END OF EJS Templates
status: update various other methods to return new class
Martin von Zweigbergk -
r22914:c95db320 default
parent child Browse files
Show More
@@ -1,1329 +1,1330 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 archival, pathutil, revset
15 archival, pathutil, revset
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from hgext import rebase
18 from hgext import rebase
19
19
20 import lfutil
20 import lfutil
21 import lfcommands
21 import lfcommands
22 import basestore
22 import basestore
23
23
24 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24 # -- Utility functions: commonly/repeatedly needed functionality ---------------
25
25
26 def installnormalfilesmatchfn(manifest):
26 def installnormalfilesmatchfn(manifest):
27 '''installmatchfn with a matchfn that ignores all largefiles'''
27 '''installmatchfn with a matchfn that ignores all largefiles'''
28 def overridematch(ctx, pats=[], opts={}, globbed=False,
28 def overridematch(ctx, pats=[], opts={}, globbed=False,
29 default='relpath'):
29 default='relpath'):
30 match = oldmatch(ctx, pats, opts, globbed, default)
30 match = oldmatch(ctx, pats, opts, globbed, default)
31 m = copy.copy(match)
31 m = copy.copy(match)
32 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
32 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
33 manifest)
33 manifest)
34 m._files = filter(notlfile, m._files)
34 m._files = filter(notlfile, m._files)
35 m._fmap = set(m._files)
35 m._fmap = set(m._files)
36 m._always = False
36 m._always = False
37 origmatchfn = m.matchfn
37 origmatchfn = m.matchfn
38 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
38 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
39 return m
39 return m
40 oldmatch = installmatchfn(overridematch)
40 oldmatch = installmatchfn(overridematch)
41
41
42 def installmatchfn(f):
42 def installmatchfn(f):
43 '''monkey patch the scmutil module with a custom match function.
43 '''monkey patch the scmutil module with a custom match function.
44 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
44 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
45 oldmatch = scmutil.match
45 oldmatch = scmutil.match
46 setattr(f, 'oldmatch', oldmatch)
46 setattr(f, 'oldmatch', oldmatch)
47 scmutil.match = f
47 scmutil.match = f
48 return oldmatch
48 return oldmatch
49
49
50 def restorematchfn():
50 def restorematchfn():
51 '''restores scmutil.match to what it was before installmatchfn
51 '''restores scmutil.match to what it was before installmatchfn
52 was called. no-op if scmutil.match is its original function.
52 was called. no-op if scmutil.match is its original function.
53
53
54 Note that n calls to installmatchfn will require n calls to
54 Note that n calls to installmatchfn will require n calls to
55 restore matchfn to reverse'''
55 restore matchfn to reverse'''
56 scmutil.match = getattr(scmutil.match, 'oldmatch')
56 scmutil.match = getattr(scmutil.match, 'oldmatch')
57
57
58 def installmatchandpatsfn(f):
58 def installmatchandpatsfn(f):
59 oldmatchandpats = scmutil.matchandpats
59 oldmatchandpats = scmutil.matchandpats
60 setattr(f, 'oldmatchandpats', oldmatchandpats)
60 setattr(f, 'oldmatchandpats', oldmatchandpats)
61 scmutil.matchandpats = f
61 scmutil.matchandpats = f
62 return oldmatchandpats
62 return oldmatchandpats
63
63
64 def restorematchandpatsfn():
64 def restorematchandpatsfn():
65 '''restores scmutil.matchandpats to what it was before
65 '''restores scmutil.matchandpats to what it was before
66 installnormalfilesmatchandpatsfn was called. no-op if scmutil.matchandpats
66 installnormalfilesmatchandpatsfn was called. no-op if scmutil.matchandpats
67 is its original function.
67 is its original function.
68
68
69 Note that n calls to installnormalfilesmatchandpatsfn will require n calls
69 Note that n calls to installnormalfilesmatchandpatsfn will require n calls
70 to restore matchfn to reverse'''
70 to restore matchfn to reverse'''
71 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
71 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
72 scmutil.matchandpats)
72 scmutil.matchandpats)
73
73
74 def addlargefiles(ui, repo, *pats, **opts):
74 def addlargefiles(ui, repo, *pats, **opts):
75 large = opts.pop('large', None)
75 large = opts.pop('large', None)
76 lfsize = lfutil.getminsize(
76 lfsize = lfutil.getminsize(
77 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
77 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
78
78
79 lfmatcher = None
79 lfmatcher = None
80 if lfutil.islfilesrepo(repo):
80 if lfutil.islfilesrepo(repo):
81 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
81 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
82 if lfpats:
82 if lfpats:
83 lfmatcher = match_.match(repo.root, '', list(lfpats))
83 lfmatcher = match_.match(repo.root, '', list(lfpats))
84
84
85 lfnames = []
85 lfnames = []
86 m = scmutil.match(repo[None], pats, opts)
86 m = scmutil.match(repo[None], pats, opts)
87 m.bad = lambda x, y: None
87 m.bad = lambda x, y: None
88 wctx = repo[None]
88 wctx = repo[None]
89 for f in repo.walk(m):
89 for f in repo.walk(m):
90 exact = m.exact(f)
90 exact = m.exact(f)
91 lfile = lfutil.standin(f) in wctx
91 lfile = lfutil.standin(f) in wctx
92 nfile = f in wctx
92 nfile = f in wctx
93 exists = lfile or nfile
93 exists = lfile or nfile
94
94
95 # Don't warn the user when they attempt to add a normal tracked file.
95 # Don't warn the user when they attempt to add a normal tracked file.
96 # The normal add code will do that for us.
96 # The normal add code will do that for us.
97 if exact and exists:
97 if exact and exists:
98 if lfile:
98 if lfile:
99 ui.warn(_('%s already a largefile\n') % f)
99 ui.warn(_('%s already a largefile\n') % f)
100 continue
100 continue
101
101
102 if (exact or not exists) and not lfutil.isstandin(f):
102 if (exact or not exists) and not lfutil.isstandin(f):
103 wfile = repo.wjoin(f)
103 wfile = repo.wjoin(f)
104
104
105 # In case the file was removed previously, but not committed
105 # In case the file was removed previously, but not committed
106 # (issue3507)
106 # (issue3507)
107 if not os.path.exists(wfile):
107 if not os.path.exists(wfile):
108 continue
108 continue
109
109
110 abovemin = (lfsize and
110 abovemin = (lfsize and
111 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
111 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
112 if large or abovemin or (lfmatcher and lfmatcher(f)):
112 if large or abovemin or (lfmatcher and lfmatcher(f)):
113 lfnames.append(f)
113 lfnames.append(f)
114 if ui.verbose or not exact:
114 if ui.verbose or not exact:
115 ui.status(_('adding %s as a largefile\n') % m.rel(f))
115 ui.status(_('adding %s as a largefile\n') % m.rel(f))
116
116
117 bad = []
117 bad = []
118 standins = []
118 standins = []
119
119
120 # Need to lock, otherwise there could be a race condition between
120 # Need to lock, otherwise there could be a race condition between
121 # when standins are created and added to the repo.
121 # when standins are created and added to the repo.
122 wlock = repo.wlock()
122 wlock = repo.wlock()
123 try:
123 try:
124 if not opts.get('dry_run'):
124 if not opts.get('dry_run'):
125 lfdirstate = lfutil.openlfdirstate(ui, repo)
125 lfdirstate = lfutil.openlfdirstate(ui, repo)
126 for f in lfnames:
126 for f in lfnames:
127 standinname = lfutil.standin(f)
127 standinname = lfutil.standin(f)
128 lfutil.writestandin(repo, standinname, hash='',
128 lfutil.writestandin(repo, standinname, hash='',
129 executable=lfutil.getexecutable(repo.wjoin(f)))
129 executable=lfutil.getexecutable(repo.wjoin(f)))
130 standins.append(standinname)
130 standins.append(standinname)
131 if lfdirstate[f] == 'r':
131 if lfdirstate[f] == 'r':
132 lfdirstate.normallookup(f)
132 lfdirstate.normallookup(f)
133 else:
133 else:
134 lfdirstate.add(f)
134 lfdirstate.add(f)
135 lfdirstate.write()
135 lfdirstate.write()
136 bad += [lfutil.splitstandin(f)
136 bad += [lfutil.splitstandin(f)
137 for f in repo[None].add(standins)
137 for f in repo[None].add(standins)
138 if f in m.files()]
138 if f in m.files()]
139 finally:
139 finally:
140 wlock.release()
140 wlock.release()
141 return bad
141 return bad
142
142
143 def removelargefiles(ui, repo, *pats, **opts):
143 def removelargefiles(ui, repo, *pats, **opts):
144 after = opts.get('after')
144 after = opts.get('after')
145 if not pats and not after:
145 if not pats and not after:
146 raise util.Abort(_('no files specified'))
146 raise util.Abort(_('no files specified'))
147 m = scmutil.match(repo[None], pats, opts)
147 m = scmutil.match(repo[None], pats, opts)
148 try:
148 try:
149 repo.lfstatus = True
149 repo.lfstatus = True
150 s = repo.status(match=m, clean=True)
150 s = repo.status(match=m, clean=True)
151 finally:
151 finally:
152 repo.lfstatus = False
152 repo.lfstatus = False
153 manifest = repo[None].manifest()
153 manifest = repo[None].manifest()
154 modified, added, deleted, clean = [[f for f in list
154 modified, added, deleted, clean = [[f for f in list
155 if lfutil.standin(f) in manifest]
155 if lfutil.standin(f) in manifest]
156 for list in [s[0], s[1], s[3], s[6]]]
156 for list in [s[0], s[1], s[3], s[6]]]
157
157
158 def warn(files, msg):
158 def warn(files, msg):
159 for f in files:
159 for f in files:
160 ui.warn(msg % m.rel(f))
160 ui.warn(msg % m.rel(f))
161 return int(len(files) > 0)
161 return int(len(files) > 0)
162
162
163 result = 0
163 result = 0
164
164
165 if after:
165 if after:
166 remove = deleted
166 remove = deleted
167 result = warn(modified + added + clean,
167 result = warn(modified + added + clean,
168 _('not removing %s: file still exists\n'))
168 _('not removing %s: file still exists\n'))
169 else:
169 else:
170 remove = deleted + clean
170 remove = deleted + clean
171 result = warn(modified, _('not removing %s: file is modified (use -f'
171 result = warn(modified, _('not removing %s: file is modified (use -f'
172 ' to force removal)\n'))
172 ' to force removal)\n'))
173 result = warn(added, _('not removing %s: file has been marked for add'
173 result = warn(added, _('not removing %s: file has been marked for add'
174 ' (use forget to undo)\n')) or result
174 ' (use forget to undo)\n')) or result
175
175
176 for f in sorted(remove):
176 for f in sorted(remove):
177 if ui.verbose or not m.exact(f):
177 if ui.verbose or not m.exact(f):
178 ui.status(_('removing %s\n') % m.rel(f))
178 ui.status(_('removing %s\n') % m.rel(f))
179
179
180 # Need to lock because standin files are deleted then removed from the
180 # Need to lock because standin files are deleted then removed from the
181 # repository and we could race in-between.
181 # repository and we could race in-between.
182 wlock = repo.wlock()
182 wlock = repo.wlock()
183 try:
183 try:
184 lfdirstate = lfutil.openlfdirstate(ui, repo)
184 lfdirstate = lfutil.openlfdirstate(ui, repo)
185 for f in remove:
185 for f in remove:
186 if not after:
186 if not after:
187 # If this is being called by addremove, notify the user that we
187 # If this is being called by addremove, notify the user that we
188 # are removing the file.
188 # are removing the file.
189 if getattr(repo, "_isaddremove", False):
189 if getattr(repo, "_isaddremove", False):
190 ui.status(_('removing %s\n') % f)
190 ui.status(_('removing %s\n') % f)
191 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
191 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
192 lfdirstate.remove(f)
192 lfdirstate.remove(f)
193 lfdirstate.write()
193 lfdirstate.write()
194 remove = [lfutil.standin(f) for f in remove]
194 remove = [lfutil.standin(f) for f in remove]
195 # If this is being called by addremove, let the original addremove
195 # If this is being called by addremove, let the original addremove
196 # function handle this.
196 # function handle this.
197 if not getattr(repo, "_isaddremove", False):
197 if not getattr(repo, "_isaddremove", False):
198 for f in remove:
198 for f in remove:
199 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
199 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
200 repo[None].forget(remove)
200 repo[None].forget(remove)
201 finally:
201 finally:
202 wlock.release()
202 wlock.release()
203
203
204 return result
204 return result
205
205
206 # For overriding mercurial.hgweb.webcommands so that largefiles will
206 # For overriding mercurial.hgweb.webcommands so that largefiles will
207 # appear at their right place in the manifests.
207 # appear at their right place in the manifests.
208 def decodepath(orig, path):
208 def decodepath(orig, path):
209 return lfutil.splitstandin(path) or path
209 return lfutil.splitstandin(path) or path
210
210
211 # -- Wrappers: modify existing commands --------------------------------
211 # -- Wrappers: modify existing commands --------------------------------
212
212
213 # Add works by going through the files that the user wanted to add and
213 # Add works by going through the files that the user wanted to add and
214 # checking if they should be added as largefiles. Then it makes a new
214 # checking if they should be added as largefiles. Then it makes a new
215 # matcher which matches only the normal files and runs the original
215 # matcher which matches only the normal files and runs the original
216 # version of add.
216 # version of add.
217 def overrideadd(orig, ui, repo, *pats, **opts):
217 def overrideadd(orig, ui, repo, *pats, **opts):
218 normal = opts.pop('normal')
218 normal = opts.pop('normal')
219 if normal:
219 if normal:
220 if opts.get('large'):
220 if opts.get('large'):
221 raise util.Abort(_('--normal cannot be used with --large'))
221 raise util.Abort(_('--normal cannot be used with --large'))
222 return orig(ui, repo, *pats, **opts)
222 return orig(ui, repo, *pats, **opts)
223 bad = addlargefiles(ui, repo, *pats, **opts)
223 bad = addlargefiles(ui, repo, *pats, **opts)
224 installnormalfilesmatchfn(repo[None].manifest())
224 installnormalfilesmatchfn(repo[None].manifest())
225 result = orig(ui, repo, *pats, **opts)
225 result = orig(ui, repo, *pats, **opts)
226 restorematchfn()
226 restorematchfn()
227
227
228 return (result == 1 or bad) and 1 or 0
228 return (result == 1 or bad) and 1 or 0
229
229
230 def overrideremove(orig, ui, repo, *pats, **opts):
230 def overrideremove(orig, ui, repo, *pats, **opts):
231 installnormalfilesmatchfn(repo[None].manifest())
231 installnormalfilesmatchfn(repo[None].manifest())
232 result = orig(ui, repo, *pats, **opts)
232 result = orig(ui, repo, *pats, **opts)
233 restorematchfn()
233 restorematchfn()
234 return removelargefiles(ui, repo, *pats, **opts) or result
234 return removelargefiles(ui, repo, *pats, **opts) or result
235
235
236 def overridestatusfn(orig, repo, rev2, **opts):
236 def overridestatusfn(orig, repo, rev2, **opts):
237 try:
237 try:
238 repo._repo.lfstatus = True
238 repo._repo.lfstatus = True
239 return orig(repo, rev2, **opts)
239 return orig(repo, rev2, **opts)
240 finally:
240 finally:
241 repo._repo.lfstatus = False
241 repo._repo.lfstatus = False
242
242
243 def overridestatus(orig, ui, repo, *pats, **opts):
243 def overridestatus(orig, ui, repo, *pats, **opts):
244 try:
244 try:
245 repo.lfstatus = True
245 repo.lfstatus = True
246 return orig(ui, repo, *pats, **opts)
246 return orig(ui, repo, *pats, **opts)
247 finally:
247 finally:
248 repo.lfstatus = False
248 repo.lfstatus = False
249
249
250 def overridedirty(orig, repo, ignoreupdate=False):
250 def overridedirty(orig, repo, ignoreupdate=False):
251 try:
251 try:
252 repo._repo.lfstatus = True
252 repo._repo.lfstatus = True
253 return orig(repo, ignoreupdate)
253 return orig(repo, ignoreupdate)
254 finally:
254 finally:
255 repo._repo.lfstatus = False
255 repo._repo.lfstatus = False
256
256
257 def overridelog(orig, ui, repo, *pats, **opts):
257 def overridelog(orig, ui, repo, *pats, **opts):
258 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
258 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
259 default='relpath'):
259 default='relpath'):
260 """Matcher that merges root directory with .hglf, suitable for log.
260 """Matcher that merges root directory with .hglf, suitable for log.
261 It is still possible to match .hglf directly.
261 It is still possible to match .hglf directly.
262 For any listed files run log on the standin too.
262 For any listed files run log on the standin too.
263 matchfn tries both the given filename and with .hglf stripped.
263 matchfn tries both the given filename and with .hglf stripped.
264 """
264 """
265 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
265 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
266 m, p = copy.copy(matchandpats)
266 m, p = copy.copy(matchandpats)
267
267
268 if m.always():
268 if m.always():
269 # We want to match everything anyway, so there's no benefit trying
269 # We want to match everything anyway, so there's no benefit trying
270 # to add standins.
270 # to add standins.
271 return matchandpats
271 return matchandpats
272
272
273 pats = set(p)
273 pats = set(p)
274 # TODO: handling of patterns in both cases below
274 # TODO: handling of patterns in both cases below
275 if m._cwd:
275 if m._cwd:
276 if os.path.isabs(m._cwd):
276 if os.path.isabs(m._cwd):
277 # TODO: handle largefile magic when invoked from other cwd
277 # TODO: handle largefile magic when invoked from other cwd
278 return matchandpats
278 return matchandpats
279 back = (m._cwd.count('/') + 1) * '../'
279 back = (m._cwd.count('/') + 1) * '../'
280 pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p)
280 pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p)
281 else:
281 else:
282 pats.update(lfutil.standin(f) for f in p)
282 pats.update(lfutil.standin(f) for f in p)
283
283
284 for i in range(0, len(m._files)):
284 for i in range(0, len(m._files)):
285 standin = lfutil.standin(m._files[i])
285 standin = lfutil.standin(m._files[i])
286 if standin in repo[ctx.node()]:
286 if standin in repo[ctx.node()]:
287 m._files[i] = standin
287 m._files[i] = standin
288 elif m._files[i] not in repo[ctx.node()]:
288 elif m._files[i] not in repo[ctx.node()]:
289 m._files.append(standin)
289 m._files.append(standin)
290 pats.add(standin)
290 pats.add(standin)
291
291
292 m._fmap = set(m._files)
292 m._fmap = set(m._files)
293 m._always = False
293 m._always = False
294 origmatchfn = m.matchfn
294 origmatchfn = m.matchfn
295 def lfmatchfn(f):
295 def lfmatchfn(f):
296 lf = lfutil.splitstandin(f)
296 lf = lfutil.splitstandin(f)
297 if lf is not None and origmatchfn(lf):
297 if lf is not None and origmatchfn(lf):
298 return True
298 return True
299 r = origmatchfn(f)
299 r = origmatchfn(f)
300 return r
300 return r
301 m.matchfn = lfmatchfn
301 m.matchfn = lfmatchfn
302
302
303 return m, pats
303 return m, pats
304
304
305 # For hg log --patch, the match object is used in two different senses:
305 # For hg log --patch, the match object is used in two different senses:
306 # (1) to determine what revisions should be printed out, and
306 # (1) to determine what revisions should be printed out, and
307 # (2) to determine what files to print out diffs for.
307 # (2) to determine what files to print out diffs for.
308 # The magic matchandpats override should be used for case (1) but not for
308 # The magic matchandpats override should be used for case (1) but not for
309 # case (2).
309 # case (2).
310 def overridemakelogfilematcher(repo, pats, opts):
310 def overridemakelogfilematcher(repo, pats, opts):
311 pctx = repo[None]
311 pctx = repo[None]
312 match, pats = oldmatchandpats(pctx, pats, opts)
312 match, pats = oldmatchandpats(pctx, pats, opts)
313 return lambda rev: match
313 return lambda rev: match
314
314
315 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
315 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
316 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
316 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
317 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
317 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
318
318
319 try:
319 try:
320 return orig(ui, repo, *pats, **opts)
320 return orig(ui, repo, *pats, **opts)
321 finally:
321 finally:
322 restorematchandpatsfn()
322 restorematchandpatsfn()
323 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
323 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
324
324
325 def overrideverify(orig, ui, repo, *pats, **opts):
325 def overrideverify(orig, ui, repo, *pats, **opts):
326 large = opts.pop('large', False)
326 large = opts.pop('large', False)
327 all = opts.pop('lfa', False)
327 all = opts.pop('lfa', False)
328 contents = opts.pop('lfc', False)
328 contents = opts.pop('lfc', False)
329
329
330 result = orig(ui, repo, *pats, **opts)
330 result = orig(ui, repo, *pats, **opts)
331 if large or all or contents:
331 if large or all or contents:
332 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
332 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
333 return result
333 return result
334
334
335 def overridedebugstate(orig, ui, repo, *pats, **opts):
335 def overridedebugstate(orig, ui, repo, *pats, **opts):
336 large = opts.pop('large', False)
336 large = opts.pop('large', False)
337 if large:
337 if large:
338 class fakerepo(object):
338 class fakerepo(object):
339 dirstate = lfutil.openlfdirstate(ui, repo)
339 dirstate = lfutil.openlfdirstate(ui, repo)
340 orig(ui, fakerepo, *pats, **opts)
340 orig(ui, fakerepo, *pats, **opts)
341 else:
341 else:
342 orig(ui, repo, *pats, **opts)
342 orig(ui, repo, *pats, **opts)
343
343
344 # Override needs to refresh standins so that update's normal merge
344 # Override needs to refresh standins so that update's normal merge
345 # will go through properly. Then the other update hook (overriding repo.update)
345 # will go through properly. Then the other update hook (overriding repo.update)
346 # will get the new files. Filemerge is also overridden so that the merge
346 # will get the new files. Filemerge is also overridden so that the merge
347 # will merge standins correctly.
347 # will merge standins correctly.
348 def overrideupdate(orig, ui, repo, *pats, **opts):
348 def overrideupdate(orig, ui, repo, *pats, **opts):
349 # Need to lock between the standins getting updated and their
349 # Need to lock between the standins getting updated and their
350 # largefiles getting updated
350 # largefiles getting updated
351 wlock = repo.wlock()
351 wlock = repo.wlock()
352 try:
352 try:
353 lfdirstate = lfutil.openlfdirstate(ui, repo)
353 lfdirstate = lfutil.openlfdirstate(ui, repo)
354 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()),
354 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()),
355 [], False, False, False)
355 [], False, False, False)
356 modified = s[0]
356 modified = s[0]
357
357
358 if opts['check']:
358 if opts['check']:
359 mod = len(modified) > 0
359 mod = len(modified) > 0
360 for lfile in unsure:
360 for lfile in unsure:
361 standin = lfutil.standin(lfile)
361 standin = lfutil.standin(lfile)
362 if repo['.'][standin].data().strip() != \
362 if repo['.'][standin].data().strip() != \
363 lfutil.hashfile(repo.wjoin(lfile)):
363 lfutil.hashfile(repo.wjoin(lfile)):
364 mod = True
364 mod = True
365 else:
365 else:
366 lfdirstate.normal(lfile)
366 lfdirstate.normal(lfile)
367 lfdirstate.write()
367 lfdirstate.write()
368 if mod:
368 if mod:
369 raise util.Abort(_('uncommitted changes'))
369 raise util.Abort(_('uncommitted changes'))
370 return orig(ui, repo, *pats, **opts)
370 return orig(ui, repo, *pats, **opts)
371 finally:
371 finally:
372 wlock.release()
372 wlock.release()
373
373
374 # Before starting the manifest merge, merge.updates will call
374 # Before starting the manifest merge, merge.updates will call
375 # _checkunknown to check if there are any files in the merged-in
375 # _checkunknown to check if there are any files in the merged-in
376 # changeset that collide with unknown files in the working copy.
376 # changeset that collide with unknown files in the working copy.
377 #
377 #
378 # The largefiles are seen as unknown, so this prevents us from merging
378 # The largefiles are seen as unknown, so this prevents us from merging
379 # in a file 'foo' if we already have a largefile with the same name.
379 # in a file 'foo' if we already have a largefile with the same name.
380 #
380 #
381 # The overridden function filters the unknown files by removing any
381 # The overridden function filters the unknown files by removing any
382 # largefiles. This makes the merge proceed and we can then handle this
382 # largefiles. This makes the merge proceed and we can then handle this
383 # case further in the overridden manifestmerge function below.
383 # case further in the overridden manifestmerge function below.
384 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
384 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
385 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
385 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
386 return False
386 return False
387 return origfn(repo, wctx, mctx, f)
387 return origfn(repo, wctx, mctx, f)
388
388
389 # The manifest merge handles conflicts on the manifest level. We want
389 # The manifest merge handles conflicts on the manifest level. We want
390 # to handle changes in largefile-ness of files at this level too.
390 # to handle changes in largefile-ness of files at this level too.
391 #
391 #
392 # The strategy is to run the original manifestmerge and then process
392 # The strategy is to run the original manifestmerge and then process
393 # the action list it outputs. There are two cases we need to deal with:
393 # the action list it outputs. There are two cases we need to deal with:
394 #
394 #
395 # 1. Normal file in p1, largefile in p2. Here the largefile is
395 # 1. Normal file in p1, largefile in p2. Here the largefile is
396 # detected via its standin file, which will enter the working copy
396 # detected via its standin file, which will enter the working copy
397 # with a "get" action. It is not "merge" since the standin is all
397 # with a "get" action. It is not "merge" since the standin is all
398 # Mercurial is concerned with at this level -- the link to the
398 # Mercurial is concerned with at this level -- the link to the
399 # existing normal file is not relevant here.
399 # existing normal file is not relevant here.
400 #
400 #
401 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
401 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
402 # since the largefile will be present in the working copy and
402 # since the largefile will be present in the working copy and
403 # different from the normal file in p2. Mercurial therefore
403 # different from the normal file in p2. Mercurial therefore
404 # triggers a merge action.
404 # triggers a merge action.
405 #
405 #
406 # In both cases, we prompt the user and emit new actions to either
406 # In both cases, we prompt the user and emit new actions to either
407 # remove the standin (if the normal file was kept) or to remove the
407 # remove the standin (if the normal file was kept) or to remove the
408 # normal file and get the standin (if the largefile was kept). The
408 # normal file and get the standin (if the largefile was kept). The
409 # default prompt answer is to use the largefile version since it was
409 # default prompt answer is to use the largefile version since it was
410 # presumably changed on purpose.
410 # presumably changed on purpose.
411 #
411 #
412 # Finally, the merge.applyupdates function will then take care of
412 # Finally, the merge.applyupdates function will then take care of
413 # writing the files into the working copy and lfcommands.updatelfiles
413 # writing the files into the working copy and lfcommands.updatelfiles
414 # will update the largefiles.
414 # will update the largefiles.
415 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
415 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
416 partial, acceptremote, followcopies):
416 partial, acceptremote, followcopies):
417 overwrite = force and not branchmerge
417 overwrite = force and not branchmerge
418 actions = origfn(repo, p1, p2, pas, branchmerge, force, partial,
418 actions = origfn(repo, p1, p2, pas, branchmerge, force, partial,
419 acceptremote, followcopies)
419 acceptremote, followcopies)
420
420
421 if overwrite:
421 if overwrite:
422 return actions
422 return actions
423
423
424 removes = set(a[0] for a in actions['r'])
424 removes = set(a[0] for a in actions['r'])
425
425
426 newglist = []
426 newglist = []
427 lfmr = [] # LargeFiles: Mark as Removed
427 lfmr = [] # LargeFiles: Mark as Removed
428 for action in actions['g']:
428 for action in actions['g']:
429 f, args, msg = action
429 f, args, msg = action
430 splitstandin = f and lfutil.splitstandin(f)
430 splitstandin = f and lfutil.splitstandin(f)
431 if (splitstandin is not None and
431 if (splitstandin is not None and
432 splitstandin in p1 and splitstandin not in removes):
432 splitstandin in p1 and splitstandin not in removes):
433 # Case 1: normal file in the working copy, largefile in
433 # Case 1: normal file in the working copy, largefile in
434 # the second parent
434 # the second parent
435 lfile = splitstandin
435 lfile = splitstandin
436 standin = f
436 standin = f
437 msg = _('remote turned local normal file %s into a largefile\n'
437 msg = _('remote turned local normal file %s into a largefile\n'
438 'use (l)argefile or keep (n)ormal file?'
438 'use (l)argefile or keep (n)ormal file?'
439 '$$ &Largefile $$ &Normal file') % lfile
439 '$$ &Largefile $$ &Normal file') % lfile
440 if repo.ui.promptchoice(msg, 0) == 0:
440 if repo.ui.promptchoice(msg, 0) == 0:
441 actions['r'].append((lfile, None, msg))
441 actions['r'].append((lfile, None, msg))
442 newglist.append((standin, (p2.flags(standin),), msg))
442 newglist.append((standin, (p2.flags(standin),), msg))
443 else:
443 else:
444 actions['r'].append((standin, None, msg))
444 actions['r'].append((standin, None, msg))
445 elif lfutil.standin(f) in p1 and lfutil.standin(f) not in removes:
445 elif lfutil.standin(f) in p1 and lfutil.standin(f) not in removes:
446 # Case 2: largefile in the working copy, normal file in
446 # Case 2: largefile in the working copy, normal file in
447 # the second parent
447 # the second parent
448 standin = lfutil.standin(f)
448 standin = lfutil.standin(f)
449 lfile = f
449 lfile = f
450 msg = _('remote turned local largefile %s into a normal file\n'
450 msg = _('remote turned local largefile %s into a normal file\n'
451 'keep (l)argefile or use (n)ormal file?'
451 'keep (l)argefile or use (n)ormal file?'
452 '$$ &Largefile $$ &Normal file') % lfile
452 '$$ &Largefile $$ &Normal file') % lfile
453 if repo.ui.promptchoice(msg, 0) == 0:
453 if repo.ui.promptchoice(msg, 0) == 0:
454 if branchmerge:
454 if branchmerge:
455 # largefile can be restored from standin safely
455 # largefile can be restored from standin safely
456 actions['r'].append((lfile, None, msg))
456 actions['r'].append((lfile, None, msg))
457 else:
457 else:
458 # "lfile" should be marked as "removed" without
458 # "lfile" should be marked as "removed" without
459 # removal of itself
459 # removal of itself
460 lfmr.append((lfile, None, msg))
460 lfmr.append((lfile, None, msg))
461
461
462 # linear-merge should treat this largefile as 're-added'
462 # linear-merge should treat this largefile as 're-added'
463 actions['a'].append((standin, None, msg))
463 actions['a'].append((standin, None, msg))
464 else:
464 else:
465 actions['r'].append((standin, None, msg))
465 actions['r'].append((standin, None, msg))
466 newglist.append((lfile, (p2.flags(lfile),), msg))
466 newglist.append((lfile, (p2.flags(lfile),), msg))
467 else:
467 else:
468 newglist.append(action)
468 newglist.append(action)
469
469
470 newglist.sort()
470 newglist.sort()
471 actions['g'] = newglist
471 actions['g'] = newglist
472 if lfmr:
472 if lfmr:
473 lfmr.sort()
473 lfmr.sort()
474 actions['lfmr'] = lfmr
474 actions['lfmr'] = lfmr
475
475
476 return actions
476 return actions
477
477
478 def mergerecordupdates(orig, repo, actions, branchmerge):
478 def mergerecordupdates(orig, repo, actions, branchmerge):
479 if 'lfmr' in actions:
479 if 'lfmr' in actions:
480 # this should be executed before 'orig', to execute 'remove'
480 # this should be executed before 'orig', to execute 'remove'
481 # before all other actions
481 # before all other actions
482 for lfile, args, msg in actions['lfmr']:
482 for lfile, args, msg in actions['lfmr']:
483 repo.dirstate.remove(lfile)
483 repo.dirstate.remove(lfile)
484
484
485 return orig(repo, actions, branchmerge)
485 return orig(repo, actions, branchmerge)
486
486
487
487
488 # Override filemerge to prompt the user about how they wish to merge
488 # Override filemerge to prompt the user about how they wish to merge
489 # largefiles. This will handle identical edits without prompting the user.
489 # largefiles. This will handle identical edits without prompting the user.
490 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
490 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
491 if not lfutil.isstandin(orig):
491 if not lfutil.isstandin(orig):
492 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
492 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
493
493
494 ahash = fca.data().strip().lower()
494 ahash = fca.data().strip().lower()
495 dhash = fcd.data().strip().lower()
495 dhash = fcd.data().strip().lower()
496 ohash = fco.data().strip().lower()
496 ohash = fco.data().strip().lower()
497 if (ohash != ahash and
497 if (ohash != ahash and
498 ohash != dhash and
498 ohash != dhash and
499 (dhash == ahash or
499 (dhash == ahash or
500 repo.ui.promptchoice(
500 repo.ui.promptchoice(
501 _('largefile %s has a merge conflict\nancestor was %s\n'
501 _('largefile %s has a merge conflict\nancestor was %s\n'
502 'keep (l)ocal %s or\ntake (o)ther %s?'
502 'keep (l)ocal %s or\ntake (o)ther %s?'
503 '$$ &Local $$ &Other') %
503 '$$ &Local $$ &Other') %
504 (lfutil.splitstandin(orig), ahash, dhash, ohash),
504 (lfutil.splitstandin(orig), ahash, dhash, ohash),
505 0) == 1)):
505 0) == 1)):
506 repo.wwrite(fcd.path(), fco.data(), fco.flags())
506 repo.wwrite(fcd.path(), fco.data(), fco.flags())
507 return 0
507 return 0
508
508
509 # Copy first changes the matchers to match standins instead of
509 # Copy first changes the matchers to match standins instead of
510 # largefiles. Then it overrides util.copyfile in that function it
510 # largefiles. Then it overrides util.copyfile in that function it
511 # checks if the destination largefile already exists. It also keeps a
511 # checks if the destination largefile already exists. It also keeps a
512 # list of copied files so that the largefiles can be copied and the
512 # list of copied files so that the largefiles can be copied and the
513 # dirstate updated.
513 # dirstate updated.
514 def overridecopy(orig, ui, repo, pats, opts, rename=False):
514 def overridecopy(orig, ui, repo, pats, opts, rename=False):
515 # doesn't remove largefile on rename
515 # doesn't remove largefile on rename
516 if len(pats) < 2:
516 if len(pats) < 2:
517 # this isn't legal, let the original function deal with it
517 # this isn't legal, let the original function deal with it
518 return orig(ui, repo, pats, opts, rename)
518 return orig(ui, repo, pats, opts, rename)
519
519
520 def makestandin(relpath):
520 def makestandin(relpath):
521 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
521 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
522 return os.path.join(repo.wjoin(lfutil.standin(path)))
522 return os.path.join(repo.wjoin(lfutil.standin(path)))
523
523
524 fullpats = scmutil.expandpats(pats)
524 fullpats = scmutil.expandpats(pats)
525 dest = fullpats[-1]
525 dest = fullpats[-1]
526
526
527 if os.path.isdir(dest):
527 if os.path.isdir(dest):
528 if not os.path.isdir(makestandin(dest)):
528 if not os.path.isdir(makestandin(dest)):
529 os.makedirs(makestandin(dest))
529 os.makedirs(makestandin(dest))
530 # This could copy both lfiles and normal files in one command,
530 # This could copy both lfiles and normal files in one command,
531 # but we don't want to do that. First replace their matcher to
531 # but we don't want to do that. First replace their matcher to
532 # only match normal files and run it, then replace it to just
532 # only match normal files and run it, then replace it to just
533 # match largefiles and run it again.
533 # match largefiles and run it again.
534 nonormalfiles = False
534 nonormalfiles = False
535 nolfiles = False
535 nolfiles = False
536 installnormalfilesmatchfn(repo[None].manifest())
536 installnormalfilesmatchfn(repo[None].manifest())
537 try:
537 try:
538 try:
538 try:
539 result = orig(ui, repo, pats, opts, rename)
539 result = orig(ui, repo, pats, opts, rename)
540 except util.Abort, e:
540 except util.Abort, e:
541 if str(e) != _('no files to copy'):
541 if str(e) != _('no files to copy'):
542 raise e
542 raise e
543 else:
543 else:
544 nonormalfiles = True
544 nonormalfiles = True
545 result = 0
545 result = 0
546 finally:
546 finally:
547 restorematchfn()
547 restorematchfn()
548
548
549 # The first rename can cause our current working directory to be removed.
549 # The first rename can cause our current working directory to be removed.
550 # In that case there is nothing left to copy/rename so just quit.
550 # In that case there is nothing left to copy/rename so just quit.
551 try:
551 try:
552 repo.getcwd()
552 repo.getcwd()
553 except OSError:
553 except OSError:
554 return result
554 return result
555
555
556 try:
556 try:
557 try:
557 try:
558 # When we call orig below it creates the standins but we don't add
558 # When we call orig below it creates the standins but we don't add
559 # them to the dir state until later so lock during that time.
559 # them to the dir state until later so lock during that time.
560 wlock = repo.wlock()
560 wlock = repo.wlock()
561
561
562 manifest = repo[None].manifest()
562 manifest = repo[None].manifest()
563 def overridematch(ctx, pats=[], opts={}, globbed=False,
563 def overridematch(ctx, pats=[], opts={}, globbed=False,
564 default='relpath'):
564 default='relpath'):
565 newpats = []
565 newpats = []
566 # The patterns were previously mangled to add the standin
566 # The patterns were previously mangled to add the standin
567 # directory; we need to remove that now
567 # directory; we need to remove that now
568 for pat in pats:
568 for pat in pats:
569 if match_.patkind(pat) is None and lfutil.shortname in pat:
569 if match_.patkind(pat) is None and lfutil.shortname in pat:
570 newpats.append(pat.replace(lfutil.shortname, ''))
570 newpats.append(pat.replace(lfutil.shortname, ''))
571 else:
571 else:
572 newpats.append(pat)
572 newpats.append(pat)
573 match = oldmatch(ctx, newpats, opts, globbed, default)
573 match = oldmatch(ctx, newpats, opts, globbed, default)
574 m = copy.copy(match)
574 m = copy.copy(match)
575 lfile = lambda f: lfutil.standin(f) in manifest
575 lfile = lambda f: lfutil.standin(f) in manifest
576 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
576 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
577 m._fmap = set(m._files)
577 m._fmap = set(m._files)
578 m._always = False
578 m._always = False
579 origmatchfn = m.matchfn
579 origmatchfn = m.matchfn
580 m.matchfn = lambda f: (lfutil.isstandin(f) and
580 m.matchfn = lambda f: (lfutil.isstandin(f) and
581 (f in manifest) and
581 (f in manifest) and
582 origmatchfn(lfutil.splitstandin(f)) or
582 origmatchfn(lfutil.splitstandin(f)) or
583 None)
583 None)
584 return m
584 return m
585 oldmatch = installmatchfn(overridematch)
585 oldmatch = installmatchfn(overridematch)
586 listpats = []
586 listpats = []
587 for pat in pats:
587 for pat in pats:
588 if match_.patkind(pat) is not None:
588 if match_.patkind(pat) is not None:
589 listpats.append(pat)
589 listpats.append(pat)
590 else:
590 else:
591 listpats.append(makestandin(pat))
591 listpats.append(makestandin(pat))
592
592
593 try:
593 try:
594 origcopyfile = util.copyfile
594 origcopyfile = util.copyfile
595 copiedfiles = []
595 copiedfiles = []
596 def overridecopyfile(src, dest):
596 def overridecopyfile(src, dest):
597 if (lfutil.shortname in src and
597 if (lfutil.shortname in src and
598 dest.startswith(repo.wjoin(lfutil.shortname))):
598 dest.startswith(repo.wjoin(lfutil.shortname))):
599 destlfile = dest.replace(lfutil.shortname, '')
599 destlfile = dest.replace(lfutil.shortname, '')
600 if not opts['force'] and os.path.exists(destlfile):
600 if not opts['force'] and os.path.exists(destlfile):
601 raise IOError('',
601 raise IOError('',
602 _('destination largefile already exists'))
602 _('destination largefile already exists'))
603 copiedfiles.append((src, dest))
603 copiedfiles.append((src, dest))
604 origcopyfile(src, dest)
604 origcopyfile(src, dest)
605
605
606 util.copyfile = overridecopyfile
606 util.copyfile = overridecopyfile
607 result += orig(ui, repo, listpats, opts, rename)
607 result += orig(ui, repo, listpats, opts, rename)
608 finally:
608 finally:
609 util.copyfile = origcopyfile
609 util.copyfile = origcopyfile
610
610
611 lfdirstate = lfutil.openlfdirstate(ui, repo)
611 lfdirstate = lfutil.openlfdirstate(ui, repo)
612 for (src, dest) in copiedfiles:
612 for (src, dest) in copiedfiles:
613 if (lfutil.shortname in src and
613 if (lfutil.shortname in src and
614 dest.startswith(repo.wjoin(lfutil.shortname))):
614 dest.startswith(repo.wjoin(lfutil.shortname))):
615 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
615 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
616 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
616 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
617 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
617 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
618 if not os.path.isdir(destlfiledir):
618 if not os.path.isdir(destlfiledir):
619 os.makedirs(destlfiledir)
619 os.makedirs(destlfiledir)
620 if rename:
620 if rename:
621 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
621 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
622
622
623 # The file is gone, but this deletes any empty parent
623 # The file is gone, but this deletes any empty parent
624 # directories as a side-effect.
624 # directories as a side-effect.
625 util.unlinkpath(repo.wjoin(srclfile), True)
625 util.unlinkpath(repo.wjoin(srclfile), True)
626 lfdirstate.remove(srclfile)
626 lfdirstate.remove(srclfile)
627 else:
627 else:
628 util.copyfile(repo.wjoin(srclfile),
628 util.copyfile(repo.wjoin(srclfile),
629 repo.wjoin(destlfile))
629 repo.wjoin(destlfile))
630
630
631 lfdirstate.add(destlfile)
631 lfdirstate.add(destlfile)
632 lfdirstate.write()
632 lfdirstate.write()
633 except util.Abort, e:
633 except util.Abort, e:
634 if str(e) != _('no files to copy'):
634 if str(e) != _('no files to copy'):
635 raise e
635 raise e
636 else:
636 else:
637 nolfiles = True
637 nolfiles = True
638 finally:
638 finally:
639 restorematchfn()
639 restorematchfn()
640 wlock.release()
640 wlock.release()
641
641
642 if nolfiles and nonormalfiles:
642 if nolfiles and nonormalfiles:
643 raise util.Abort(_('no files to copy'))
643 raise util.Abort(_('no files to copy'))
644
644
645 return result
645 return result
646
646
647 # When the user calls revert, we have to be careful to not revert any
647 # When the user calls revert, we have to be careful to not revert any
648 # changes to other largefiles accidentally. This means we have to keep
648 # changes to other largefiles accidentally. This means we have to keep
649 # track of the largefiles that are being reverted so we only pull down
649 # track of the largefiles that are being reverted so we only pull down
650 # the necessary largefiles.
650 # the necessary largefiles.
651 #
651 #
652 # Standins are only updated (to match the hash of largefiles) before
652 # Standins are only updated (to match the hash of largefiles) before
653 # commits. Update the standins then run the original revert, changing
653 # commits. Update the standins then run the original revert, changing
654 # the matcher to hit standins instead of largefiles. Based on the
654 # the matcher to hit standins instead of largefiles. Based on the
655 # resulting standins update the largefiles.
655 # resulting standins update the largefiles.
656 def overriderevert(orig, ui, repo, *pats, **opts):
656 def overriderevert(orig, ui, repo, *pats, **opts):
657 # Because we put the standins in a bad state (by updating them)
657 # Because we put the standins in a bad state (by updating them)
658 # and then return them to a correct state we need to lock to
658 # and then return them to a correct state we need to lock to
659 # prevent others from changing them in their incorrect state.
659 # prevent others from changing them in their incorrect state.
660 wlock = repo.wlock()
660 wlock = repo.wlock()
661 try:
661 try:
662 lfdirstate = lfutil.openlfdirstate(ui, repo)
662 lfdirstate = lfutil.openlfdirstate(ui, repo)
663 (modified, added, removed, missing, unknown, ignored, clean) = \
663 (modified, added, removed, missing, unknown, ignored, clean) = \
664 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
664 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
665 lfdirstate.write()
665 lfdirstate.write()
666 for lfile in modified:
666 for lfile in modified:
667 lfutil.updatestandin(repo, lfutil.standin(lfile))
667 lfutil.updatestandin(repo, lfutil.standin(lfile))
668 for lfile in missing:
668 for lfile in missing:
669 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
669 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
670 os.unlink(repo.wjoin(lfutil.standin(lfile)))
670 os.unlink(repo.wjoin(lfutil.standin(lfile)))
671
671
672 oldstandins = lfutil.getstandinsstate(repo)
672 oldstandins = lfutil.getstandinsstate(repo)
673
673
674 def overridematch(ctx, pats=[], opts={}, globbed=False,
674 def overridematch(ctx, pats=[], opts={}, globbed=False,
675 default='relpath'):
675 default='relpath'):
676 match = oldmatch(ctx, pats, opts, globbed, default)
676 match = oldmatch(ctx, pats, opts, globbed, default)
677 m = copy.copy(match)
677 m = copy.copy(match)
678 def tostandin(f):
678 def tostandin(f):
679 if lfutil.standin(f) in ctx:
679 if lfutil.standin(f) in ctx:
680 return lfutil.standin(f)
680 return lfutil.standin(f)
681 elif lfutil.standin(f) in repo[None]:
681 elif lfutil.standin(f) in repo[None]:
682 return None
682 return None
683 return f
683 return f
684 m._files = [tostandin(f) for f in m._files]
684 m._files = [tostandin(f) for f in m._files]
685 m._files = [f for f in m._files if f is not None]
685 m._files = [f for f in m._files if f is not None]
686 m._fmap = set(m._files)
686 m._fmap = set(m._files)
687 m._always = False
687 m._always = False
688 origmatchfn = m.matchfn
688 origmatchfn = m.matchfn
689 def matchfn(f):
689 def matchfn(f):
690 if lfutil.isstandin(f):
690 if lfutil.isstandin(f):
691 return (origmatchfn(lfutil.splitstandin(f)) and
691 return (origmatchfn(lfutil.splitstandin(f)) and
692 (f in repo[None] or f in ctx))
692 (f in repo[None] or f in ctx))
693 return origmatchfn(f)
693 return origmatchfn(f)
694 m.matchfn = matchfn
694 m.matchfn = matchfn
695 return m
695 return m
696 oldmatch = installmatchfn(overridematch)
696 oldmatch = installmatchfn(overridematch)
697 try:
697 try:
698 orig(ui, repo, *pats, **opts)
698 orig(ui, repo, *pats, **opts)
699 finally:
699 finally:
700 restorematchfn()
700 restorematchfn()
701
701
702 newstandins = lfutil.getstandinsstate(repo)
702 newstandins = lfutil.getstandinsstate(repo)
703 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
703 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
704 # lfdirstate should be 'normallookup'-ed for updated files,
704 # lfdirstate should be 'normallookup'-ed for updated files,
705 # because reverting doesn't touch dirstate for 'normal' files
705 # because reverting doesn't touch dirstate for 'normal' files
706 # when target revision is explicitly specified: in such case,
706 # when target revision is explicitly specified: in such case,
707 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
707 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
708 # of target (standin) file.
708 # of target (standin) file.
709 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
709 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
710 normallookup=True)
710 normallookup=True)
711
711
712 finally:
712 finally:
713 wlock.release()
713 wlock.release()
714
714
715 # When we rebase a repository with remotely changed largefiles, we need to
715 # When we rebase a repository with remotely changed largefiles, we need to
716 # take some extra care so that the largefiles are correctly updated in the
716 # take some extra care so that the largefiles are correctly updated in the
717 # working copy
717 # working copy
718 def overridepull(orig, ui, repo, source=None, **opts):
718 def overridepull(orig, ui, repo, source=None, **opts):
719 revsprepull = len(repo)
719 revsprepull = len(repo)
720 if not source:
720 if not source:
721 source = 'default'
721 source = 'default'
722 repo.lfpullsource = source
722 repo.lfpullsource = source
723 if opts.get('rebase', False):
723 if opts.get('rebase', False):
724 repo._isrebasing = True
724 repo._isrebasing = True
725 try:
725 try:
726 if opts.get('update'):
726 if opts.get('update'):
727 del opts['update']
727 del opts['update']
728 ui.debug('--update and --rebase are not compatible, ignoring '
728 ui.debug('--update and --rebase are not compatible, ignoring '
729 'the update flag\n')
729 'the update flag\n')
730 del opts['rebase']
730 del opts['rebase']
731 origpostincoming = commands.postincoming
731 origpostincoming = commands.postincoming
732 def _dummy(*args, **kwargs):
732 def _dummy(*args, **kwargs):
733 pass
733 pass
734 commands.postincoming = _dummy
734 commands.postincoming = _dummy
735 try:
735 try:
736 result = commands.pull(ui, repo, source, **opts)
736 result = commands.pull(ui, repo, source, **opts)
737 finally:
737 finally:
738 commands.postincoming = origpostincoming
738 commands.postincoming = origpostincoming
739 revspostpull = len(repo)
739 revspostpull = len(repo)
740 if revspostpull > revsprepull:
740 if revspostpull > revsprepull:
741 result = result or rebase.rebase(ui, repo)
741 result = result or rebase.rebase(ui, repo)
742 finally:
742 finally:
743 repo._isrebasing = False
743 repo._isrebasing = False
744 else:
744 else:
745 result = orig(ui, repo, source, **opts)
745 result = orig(ui, repo, source, **opts)
746 revspostpull = len(repo)
746 revspostpull = len(repo)
747 lfrevs = opts.get('lfrev', [])
747 lfrevs = opts.get('lfrev', [])
748 if opts.get('all_largefiles'):
748 if opts.get('all_largefiles'):
749 lfrevs.append('pulled()')
749 lfrevs.append('pulled()')
750 if lfrevs and revspostpull > revsprepull:
750 if lfrevs and revspostpull > revsprepull:
751 numcached = 0
751 numcached = 0
752 repo.firstpulled = revsprepull # for pulled() revset expression
752 repo.firstpulled = revsprepull # for pulled() revset expression
753 try:
753 try:
754 for rev in scmutil.revrange(repo, lfrevs):
754 for rev in scmutil.revrange(repo, lfrevs):
755 ui.note(_('pulling largefiles for revision %s\n') % rev)
755 ui.note(_('pulling largefiles for revision %s\n') % rev)
756 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
756 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
757 numcached += len(cached)
757 numcached += len(cached)
758 finally:
758 finally:
759 del repo.firstpulled
759 del repo.firstpulled
760 ui.status(_("%d largefiles cached\n") % numcached)
760 ui.status(_("%d largefiles cached\n") % numcached)
761 return result
761 return result
762
762
763 def pulledrevsetsymbol(repo, subset, x):
763 def pulledrevsetsymbol(repo, subset, x):
764 """``pulled()``
764 """``pulled()``
765 Changesets that just has been pulled.
765 Changesets that just has been pulled.
766
766
767 Only available with largefiles from pull --lfrev expressions.
767 Only available with largefiles from pull --lfrev expressions.
768
768
769 .. container:: verbose
769 .. container:: verbose
770
770
771 Some examples:
771 Some examples:
772
772
773 - pull largefiles for all new changesets::
773 - pull largefiles for all new changesets::
774
774
775 hg pull -lfrev "pulled()"
775 hg pull -lfrev "pulled()"
776
776
777 - pull largefiles for all new branch heads::
777 - pull largefiles for all new branch heads::
778
778
779 hg pull -lfrev "head(pulled()) and not closed()"
779 hg pull -lfrev "head(pulled()) and not closed()"
780
780
781 """
781 """
782
782
783 try:
783 try:
784 firstpulled = repo.firstpulled
784 firstpulled = repo.firstpulled
785 except AttributeError:
785 except AttributeError:
786 raise util.Abort(_("pulled() only available in --lfrev"))
786 raise util.Abort(_("pulled() only available in --lfrev"))
787 return revset.baseset([r for r in subset if r >= firstpulled])
787 return revset.baseset([r for r in subset if r >= firstpulled])
788
788
789 def overrideclone(orig, ui, source, dest=None, **opts):
789 def overrideclone(orig, ui, source, dest=None, **opts):
790 d = dest
790 d = dest
791 if d is None:
791 if d is None:
792 d = hg.defaultdest(source)
792 d = hg.defaultdest(source)
793 if opts.get('all_largefiles') and not hg.islocal(d):
793 if opts.get('all_largefiles') and not hg.islocal(d):
794 raise util.Abort(_(
794 raise util.Abort(_(
795 '--all-largefiles is incompatible with non-local destination %s') %
795 '--all-largefiles is incompatible with non-local destination %s') %
796 d)
796 d)
797
797
798 return orig(ui, source, dest, **opts)
798 return orig(ui, source, dest, **opts)
799
799
800 def hgclone(orig, ui, opts, *args, **kwargs):
800 def hgclone(orig, ui, opts, *args, **kwargs):
801 result = orig(ui, opts, *args, **kwargs)
801 result = orig(ui, opts, *args, **kwargs)
802
802
803 if result is not None:
803 if result is not None:
804 sourcerepo, destrepo = result
804 sourcerepo, destrepo = result
805 repo = destrepo.local()
805 repo = destrepo.local()
806
806
807 # Caching is implicitly limited to 'rev' option, since the dest repo was
807 # Caching is implicitly limited to 'rev' option, since the dest repo was
808 # truncated at that point. The user may expect a download count with
808 # truncated at that point. The user may expect a download count with
809 # this option, so attempt whether or not this is a largefile repo.
809 # this option, so attempt whether or not this is a largefile repo.
810 if opts.get('all_largefiles'):
810 if opts.get('all_largefiles'):
811 success, missing = lfcommands.downloadlfiles(ui, repo, None)
811 success, missing = lfcommands.downloadlfiles(ui, repo, None)
812
812
813 if missing != 0:
813 if missing != 0:
814 return None
814 return None
815
815
816 return result
816 return result
817
817
818 def overriderebase(orig, ui, repo, **opts):
818 def overriderebase(orig, ui, repo, **opts):
819 repo._isrebasing = True
819 repo._isrebasing = True
820 try:
820 try:
821 return orig(ui, repo, **opts)
821 return orig(ui, repo, **opts)
822 finally:
822 finally:
823 repo._isrebasing = False
823 repo._isrebasing = False
824
824
825 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
825 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
826 prefix=None, mtime=None, subrepos=None):
826 prefix=None, mtime=None, subrepos=None):
827 # No need to lock because we are only reading history and
827 # No need to lock because we are only reading history and
828 # largefile caches, neither of which are modified.
828 # largefile caches, neither of which are modified.
829 lfcommands.cachelfiles(repo.ui, repo, node)
829 lfcommands.cachelfiles(repo.ui, repo, node)
830
830
831 if kind not in archival.archivers:
831 if kind not in archival.archivers:
832 raise util.Abort(_("unknown archive type '%s'") % kind)
832 raise util.Abort(_("unknown archive type '%s'") % kind)
833
833
834 ctx = repo[node]
834 ctx = repo[node]
835
835
836 if kind == 'files':
836 if kind == 'files':
837 if prefix:
837 if prefix:
838 raise util.Abort(
838 raise util.Abort(
839 _('cannot give prefix when archiving to files'))
839 _('cannot give prefix when archiving to files'))
840 else:
840 else:
841 prefix = archival.tidyprefix(dest, kind, prefix)
841 prefix = archival.tidyprefix(dest, kind, prefix)
842
842
843 def write(name, mode, islink, getdata):
843 def write(name, mode, islink, getdata):
844 if matchfn and not matchfn(name):
844 if matchfn and not matchfn(name):
845 return
845 return
846 data = getdata()
846 data = getdata()
847 if decode:
847 if decode:
848 data = repo.wwritedata(name, data)
848 data = repo.wwritedata(name, data)
849 archiver.addfile(prefix + name, mode, islink, data)
849 archiver.addfile(prefix + name, mode, islink, data)
850
850
851 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
851 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
852
852
853 if repo.ui.configbool("ui", "archivemeta", True):
853 if repo.ui.configbool("ui", "archivemeta", True):
854 def metadata():
854 def metadata():
855 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
855 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
856 hex(repo.changelog.node(0)), hex(node), ctx.branch())
856 hex(repo.changelog.node(0)), hex(node), ctx.branch())
857
857
858 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
858 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
859 if repo.tagtype(t) == 'global')
859 if repo.tagtype(t) == 'global')
860 if not tags:
860 if not tags:
861 repo.ui.pushbuffer()
861 repo.ui.pushbuffer()
862 opts = {'template': '{latesttag}\n{latesttagdistance}',
862 opts = {'template': '{latesttag}\n{latesttagdistance}',
863 'style': '', 'patch': None, 'git': None}
863 'style': '', 'patch': None, 'git': None}
864 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
864 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
865 ltags, dist = repo.ui.popbuffer().split('\n')
865 ltags, dist = repo.ui.popbuffer().split('\n')
866 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
866 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
867 tags += 'latesttagdistance: %s\n' % dist
867 tags += 'latesttagdistance: %s\n' % dist
868
868
869 return base + tags
869 return base + tags
870
870
871 write('.hg_archival.txt', 0644, False, metadata)
871 write('.hg_archival.txt', 0644, False, metadata)
872
872
873 for f in ctx:
873 for f in ctx:
874 ff = ctx.flags(f)
874 ff = ctx.flags(f)
875 getdata = ctx[f].data
875 getdata = ctx[f].data
876 if lfutil.isstandin(f):
876 if lfutil.isstandin(f):
877 path = lfutil.findfile(repo, getdata().strip())
877 path = lfutil.findfile(repo, getdata().strip())
878 if path is None:
878 if path is None:
879 raise util.Abort(
879 raise util.Abort(
880 _('largefile %s not found in repo store or system cache')
880 _('largefile %s not found in repo store or system cache')
881 % lfutil.splitstandin(f))
881 % lfutil.splitstandin(f))
882 f = lfutil.splitstandin(f)
882 f = lfutil.splitstandin(f)
883
883
884 def getdatafn():
884 def getdatafn():
885 fd = None
885 fd = None
886 try:
886 try:
887 fd = open(path, 'rb')
887 fd = open(path, 'rb')
888 return fd.read()
888 return fd.read()
889 finally:
889 finally:
890 if fd:
890 if fd:
891 fd.close()
891 fd.close()
892
892
893 getdata = getdatafn
893 getdata = getdatafn
894 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
894 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
895
895
896 if subrepos:
896 if subrepos:
897 for subpath in sorted(ctx.substate):
897 for subpath in sorted(ctx.substate):
898 sub = ctx.sub(subpath)
898 sub = ctx.sub(subpath)
899 submatch = match_.narrowmatcher(subpath, matchfn)
899 submatch = match_.narrowmatcher(subpath, matchfn)
900 sub.archive(repo.ui, archiver, prefix, submatch)
900 sub.archive(repo.ui, archiver, prefix, submatch)
901
901
902 archiver.done()
902 archiver.done()
903
903
904 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
904 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
905 repo._get(repo._state + ('hg',))
905 repo._get(repo._state + ('hg',))
906 rev = repo._state[1]
906 rev = repo._state[1]
907 ctx = repo._repo[rev]
907 ctx = repo._repo[rev]
908
908
909 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
909 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
910
910
911 def write(name, mode, islink, getdata):
911 def write(name, mode, islink, getdata):
912 # At this point, the standin has been replaced with the largefile name,
912 # At this point, the standin has been replaced with the largefile name,
913 # so the normal matcher works here without the lfutil variants.
913 # so the normal matcher works here without the lfutil variants.
914 if match and not match(f):
914 if match and not match(f):
915 return
915 return
916 data = getdata()
916 data = getdata()
917
917
918 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
918 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
919
919
920 for f in ctx:
920 for f in ctx:
921 ff = ctx.flags(f)
921 ff = ctx.flags(f)
922 getdata = ctx[f].data
922 getdata = ctx[f].data
923 if lfutil.isstandin(f):
923 if lfutil.isstandin(f):
924 path = lfutil.findfile(repo._repo, getdata().strip())
924 path = lfutil.findfile(repo._repo, getdata().strip())
925 if path is None:
925 if path is None:
926 raise util.Abort(
926 raise util.Abort(
927 _('largefile %s not found in repo store or system cache')
927 _('largefile %s not found in repo store or system cache')
928 % lfutil.splitstandin(f))
928 % lfutil.splitstandin(f))
929 f = lfutil.splitstandin(f)
929 f = lfutil.splitstandin(f)
930
930
931 def getdatafn():
931 def getdatafn():
932 fd = None
932 fd = None
933 try:
933 try:
934 fd = open(os.path.join(prefix, path), 'rb')
934 fd = open(os.path.join(prefix, path), 'rb')
935 return fd.read()
935 return fd.read()
936 finally:
936 finally:
937 if fd:
937 if fd:
938 fd.close()
938 fd.close()
939
939
940 getdata = getdatafn
940 getdata = getdatafn
941
941
942 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
942 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
943
943
944 for subpath in sorted(ctx.substate):
944 for subpath in sorted(ctx.substate):
945 sub = ctx.sub(subpath)
945 sub = ctx.sub(subpath)
946 submatch = match_.narrowmatcher(subpath, match)
946 submatch = match_.narrowmatcher(subpath, match)
947 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
947 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
948 submatch)
948 submatch)
949
949
950 # If a largefile is modified, the change is not reflected in its
950 # If a largefile is modified, the change is not reflected in its
951 # standin until a commit. cmdutil.bailifchanged() raises an exception
951 # standin until a commit. cmdutil.bailifchanged() raises an exception
952 # if the repo has uncommitted changes. Wrap it to also check if
952 # if the repo has uncommitted changes. Wrap it to also check if
953 # largefiles were changed. This is used by bisect and backout.
953 # largefiles were changed. This is used by bisect and backout.
954 def overridebailifchanged(orig, repo):
954 def overridebailifchanged(orig, repo):
955 orig(repo)
955 orig(repo)
956 repo.lfstatus = True
956 repo.lfstatus = True
957 modified, added, removed, deleted = repo.status()[:4]
957 modified, added, removed, deleted = repo.status()[:4]
958 repo.lfstatus = False
958 repo.lfstatus = False
959 if modified or added or removed or deleted:
959 if modified or added or removed or deleted:
960 raise util.Abort(_('uncommitted changes'))
960 raise util.Abort(_('uncommitted changes'))
961
961
962 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
962 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
963 def overridefetch(orig, ui, repo, *pats, **opts):
963 def overridefetch(orig, ui, repo, *pats, **opts):
964 repo.lfstatus = True
964 repo.lfstatus = True
965 modified, added, removed, deleted = repo.status()[:4]
965 modified, added, removed, deleted = repo.status()[:4]
966 repo.lfstatus = False
966 repo.lfstatus = False
967 if modified or added or removed or deleted:
967 if modified or added or removed or deleted:
968 raise util.Abort(_('uncommitted changes'))
968 raise util.Abort(_('uncommitted changes'))
969 return orig(ui, repo, *pats, **opts)
969 return orig(ui, repo, *pats, **opts)
970
970
971 def overrideforget(orig, ui, repo, *pats, **opts):
971 def overrideforget(orig, ui, repo, *pats, **opts):
972 installnormalfilesmatchfn(repo[None].manifest())
972 installnormalfilesmatchfn(repo[None].manifest())
973 result = orig(ui, repo, *pats, **opts)
973 result = orig(ui, repo, *pats, **opts)
974 restorematchfn()
974 restorematchfn()
975 m = scmutil.match(repo[None], pats, opts)
975 m = scmutil.match(repo[None], pats, opts)
976
976
977 try:
977 try:
978 repo.lfstatus = True
978 repo.lfstatus = True
979 s = repo.status(match=m, clean=True)
979 s = repo.status(match=m, clean=True)
980 finally:
980 finally:
981 repo.lfstatus = False
981 repo.lfstatus = False
982 forget = sorted(s[0] + s[1] + s[3] + s[6])
982 forget = sorted(s[0] + s[1] + s[3] + s[6])
983 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
983 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
984
984
985 for f in forget:
985 for f in forget:
986 if lfutil.standin(f) not in repo.dirstate and not \
986 if lfutil.standin(f) not in repo.dirstate and not \
987 os.path.isdir(m.rel(lfutil.standin(f))):
987 os.path.isdir(m.rel(lfutil.standin(f))):
988 ui.warn(_('not removing %s: file is already untracked\n')
988 ui.warn(_('not removing %s: file is already untracked\n')
989 % m.rel(f))
989 % m.rel(f))
990 result = 1
990 result = 1
991
991
992 for f in forget:
992 for f in forget:
993 if ui.verbose or not m.exact(f):
993 if ui.verbose or not m.exact(f):
994 ui.status(_('removing %s\n') % m.rel(f))
994 ui.status(_('removing %s\n') % m.rel(f))
995
995
996 # Need to lock because standin files are deleted then removed from the
996 # Need to lock because standin files are deleted then removed from the
997 # repository and we could race in-between.
997 # repository and we could race in-between.
998 wlock = repo.wlock()
998 wlock = repo.wlock()
999 try:
999 try:
1000 lfdirstate = lfutil.openlfdirstate(ui, repo)
1000 lfdirstate = lfutil.openlfdirstate(ui, repo)
1001 for f in forget:
1001 for f in forget:
1002 if lfdirstate[f] == 'a':
1002 if lfdirstate[f] == 'a':
1003 lfdirstate.drop(f)
1003 lfdirstate.drop(f)
1004 else:
1004 else:
1005 lfdirstate.remove(f)
1005 lfdirstate.remove(f)
1006 lfdirstate.write()
1006 lfdirstate.write()
1007 standins = [lfutil.standin(f) for f in forget]
1007 standins = [lfutil.standin(f) for f in forget]
1008 for f in standins:
1008 for f in standins:
1009 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1009 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1010 repo[None].forget(standins)
1010 repo[None].forget(standins)
1011 finally:
1011 finally:
1012 wlock.release()
1012 wlock.release()
1013
1013
1014 return result
1014 return result
1015
1015
1016 def _getoutgoings(repo, other, missing, addfunc):
1016 def _getoutgoings(repo, other, missing, addfunc):
1017 """get pairs of filename and largefile hash in outgoing revisions
1017 """get pairs of filename and largefile hash in outgoing revisions
1018 in 'missing'.
1018 in 'missing'.
1019
1019
1020 largefiles already existing on 'other' repository are ignored.
1020 largefiles already existing on 'other' repository are ignored.
1021
1021
1022 'addfunc' is invoked with each unique pairs of filename and
1022 'addfunc' is invoked with each unique pairs of filename and
1023 largefile hash value.
1023 largefile hash value.
1024 """
1024 """
1025 knowns = set()
1025 knowns = set()
1026 lfhashes = set()
1026 lfhashes = set()
1027 def dedup(fn, lfhash):
1027 def dedup(fn, lfhash):
1028 k = (fn, lfhash)
1028 k = (fn, lfhash)
1029 if k not in knowns:
1029 if k not in knowns:
1030 knowns.add(k)
1030 knowns.add(k)
1031 lfhashes.add(lfhash)
1031 lfhashes.add(lfhash)
1032 lfutil.getlfilestoupload(repo, missing, dedup)
1032 lfutil.getlfilestoupload(repo, missing, dedup)
1033 if lfhashes:
1033 if lfhashes:
1034 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1034 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1035 for fn, lfhash in knowns:
1035 for fn, lfhash in knowns:
1036 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1036 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1037 addfunc(fn, lfhash)
1037 addfunc(fn, lfhash)
1038
1038
1039 def outgoinghook(ui, repo, other, opts, missing):
1039 def outgoinghook(ui, repo, other, opts, missing):
1040 if opts.pop('large', None):
1040 if opts.pop('large', None):
1041 lfhashes = set()
1041 lfhashes = set()
1042 if ui.debugflag:
1042 if ui.debugflag:
1043 toupload = {}
1043 toupload = {}
1044 def addfunc(fn, lfhash):
1044 def addfunc(fn, lfhash):
1045 if fn not in toupload:
1045 if fn not in toupload:
1046 toupload[fn] = []
1046 toupload[fn] = []
1047 toupload[fn].append(lfhash)
1047 toupload[fn].append(lfhash)
1048 lfhashes.add(lfhash)
1048 lfhashes.add(lfhash)
1049 def showhashes(fn):
1049 def showhashes(fn):
1050 for lfhash in sorted(toupload[fn]):
1050 for lfhash in sorted(toupload[fn]):
1051 ui.debug(' %s\n' % (lfhash))
1051 ui.debug(' %s\n' % (lfhash))
1052 else:
1052 else:
1053 toupload = set()
1053 toupload = set()
1054 def addfunc(fn, lfhash):
1054 def addfunc(fn, lfhash):
1055 toupload.add(fn)
1055 toupload.add(fn)
1056 lfhashes.add(lfhash)
1056 lfhashes.add(lfhash)
1057 def showhashes(fn):
1057 def showhashes(fn):
1058 pass
1058 pass
1059 _getoutgoings(repo, other, missing, addfunc)
1059 _getoutgoings(repo, other, missing, addfunc)
1060
1060
1061 if not toupload:
1061 if not toupload:
1062 ui.status(_('largefiles: no files to upload\n'))
1062 ui.status(_('largefiles: no files to upload\n'))
1063 else:
1063 else:
1064 ui.status(_('largefiles to upload (%d entities):\n')
1064 ui.status(_('largefiles to upload (%d entities):\n')
1065 % (len(lfhashes)))
1065 % (len(lfhashes)))
1066 for file in sorted(toupload):
1066 for file in sorted(toupload):
1067 ui.status(lfutil.splitstandin(file) + '\n')
1067 ui.status(lfutil.splitstandin(file) + '\n')
1068 showhashes(file)
1068 showhashes(file)
1069 ui.status('\n')
1069 ui.status('\n')
1070
1070
1071 def summaryremotehook(ui, repo, opts, changes):
1071 def summaryremotehook(ui, repo, opts, changes):
1072 largeopt = opts.get('large', False)
1072 largeopt = opts.get('large', False)
1073 if changes is None:
1073 if changes is None:
1074 if largeopt:
1074 if largeopt:
1075 return (False, True) # only outgoing check is needed
1075 return (False, True) # only outgoing check is needed
1076 else:
1076 else:
1077 return (False, False)
1077 return (False, False)
1078 elif largeopt:
1078 elif largeopt:
1079 url, branch, peer, outgoing = changes[1]
1079 url, branch, peer, outgoing = changes[1]
1080 if peer is None:
1080 if peer is None:
1081 # i18n: column positioning for "hg summary"
1081 # i18n: column positioning for "hg summary"
1082 ui.status(_('largefiles: (no remote repo)\n'))
1082 ui.status(_('largefiles: (no remote repo)\n'))
1083 return
1083 return
1084
1084
1085 toupload = set()
1085 toupload = set()
1086 lfhashes = set()
1086 lfhashes = set()
1087 def addfunc(fn, lfhash):
1087 def addfunc(fn, lfhash):
1088 toupload.add(fn)
1088 toupload.add(fn)
1089 lfhashes.add(lfhash)
1089 lfhashes.add(lfhash)
1090 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1090 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1091
1091
1092 if not toupload:
1092 if not toupload:
1093 # i18n: column positioning for "hg summary"
1093 # i18n: column positioning for "hg summary"
1094 ui.status(_('largefiles: (no files to upload)\n'))
1094 ui.status(_('largefiles: (no files to upload)\n'))
1095 else:
1095 else:
1096 # i18n: column positioning for "hg summary"
1096 # i18n: column positioning for "hg summary"
1097 ui.status(_('largefiles: %d entities for %d files to upload\n')
1097 ui.status(_('largefiles: %d entities for %d files to upload\n')
1098 % (len(lfhashes), len(toupload)))
1098 % (len(lfhashes), len(toupload)))
1099
1099
1100 def overridesummary(orig, ui, repo, *pats, **opts):
1100 def overridesummary(orig, ui, repo, *pats, **opts):
1101 try:
1101 try:
1102 repo.lfstatus = True
1102 repo.lfstatus = True
1103 orig(ui, repo, *pats, **opts)
1103 orig(ui, repo, *pats, **opts)
1104 finally:
1104 finally:
1105 repo.lfstatus = False
1105 repo.lfstatus = False
1106
1106
1107 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1107 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1108 similarity=None):
1108 similarity=None):
1109 if not lfutil.islfilesrepo(repo):
1109 if not lfutil.islfilesrepo(repo):
1110 return orig(repo, pats, opts, dry_run, similarity)
1110 return orig(repo, pats, opts, dry_run, similarity)
1111 # Get the list of missing largefiles so we can remove them
1111 # Get the list of missing largefiles so we can remove them
1112 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1112 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1113 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1113 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1114 False, False, False)
1114 False, False, False)
1115 missing = s[3]
1115 missing = s[3]
1116
1116
1117 # Call into the normal remove code, but the removing of the standin, we want
1117 # Call into the normal remove code, but the removing of the standin, we want
1118 # to have handled by original addremove. Monkey patching here makes sure
1118 # to have handled by original addremove. Monkey patching here makes sure
1119 # we don't remove the standin in the largefiles code, preventing a very
1119 # we don't remove the standin in the largefiles code, preventing a very
1120 # confused state later.
1120 # confused state later.
1121 if missing:
1121 if missing:
1122 m = [repo.wjoin(f) for f in missing]
1122 m = [repo.wjoin(f) for f in missing]
1123 repo._isaddremove = True
1123 repo._isaddremove = True
1124 removelargefiles(repo.ui, repo, *m, **opts)
1124 removelargefiles(repo.ui, repo, *m, **opts)
1125 repo._isaddremove = False
1125 repo._isaddremove = False
1126 # Call into the normal add code, and any files that *should* be added as
1126 # Call into the normal add code, and any files that *should* be added as
1127 # largefiles will be
1127 # largefiles will be
1128 addlargefiles(repo.ui, repo, *pats, **opts)
1128 addlargefiles(repo.ui, repo, *pats, **opts)
1129 # Now that we've handled largefiles, hand off to the original addremove
1129 # Now that we've handled largefiles, hand off to the original addremove
1130 # function to take care of the rest. Make sure it doesn't do anything with
1130 # function to take care of the rest. Make sure it doesn't do anything with
1131 # largefiles by installing a matcher that will ignore them.
1131 # largefiles by installing a matcher that will ignore them.
1132 installnormalfilesmatchfn(repo[None].manifest())
1132 installnormalfilesmatchfn(repo[None].manifest())
1133 result = orig(repo, pats, opts, dry_run, similarity)
1133 result = orig(repo, pats, opts, dry_run, similarity)
1134 restorematchfn()
1134 restorematchfn()
1135 return result
1135 return result
1136
1136
1137 # Calling purge with --all will cause the largefiles to be deleted.
1137 # Calling purge with --all will cause the largefiles to be deleted.
1138 # Override repo.status to prevent this from happening.
1138 # Override repo.status to prevent this from happening.
1139 def overridepurge(orig, ui, repo, *dirs, **opts):
1139 def overridepurge(orig, ui, repo, *dirs, **opts):
1140 # XXX large file status is buggy when used on repo proxy.
1140 # XXX large file status is buggy when used on repo proxy.
1141 # XXX this needs to be investigate.
1141 # XXX this needs to be investigate.
1142 repo = repo.unfiltered()
1142 repo = repo.unfiltered()
1143 oldstatus = repo.status
1143 oldstatus = repo.status
1144 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1144 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1145 clean=False, unknown=False, listsubrepos=False):
1145 clean=False, unknown=False, listsubrepos=False):
1146 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1146 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1147 listsubrepos)
1147 listsubrepos)
1148 lfdirstate = lfutil.openlfdirstate(ui, repo)
1148 lfdirstate = lfutil.openlfdirstate(ui, repo)
1149 modified, added, removed, deleted, unknown, ignored, clean = r
1149 modified, added, removed, deleted, unknown, ignored, clean = r
1150 unknown = [f for f in unknown if lfdirstate[f] == '?']
1150 unknown = [f for f in unknown if lfdirstate[f] == '?']
1151 ignored = [f for f in ignored if lfdirstate[f] == '?']
1151 ignored = [f for f in ignored if lfdirstate[f] == '?']
1152 return modified, added, removed, deleted, unknown, ignored, clean
1152 return scmutil.status(modified, added, removed, deleted,
1153 unknown, ignored, clean)
1153 repo.status = overridestatus
1154 repo.status = overridestatus
1154 orig(ui, repo, *dirs, **opts)
1155 orig(ui, repo, *dirs, **opts)
1155 repo.status = oldstatus
1156 repo.status = oldstatus
1156
1157
1157 def overriderollback(orig, ui, repo, **opts):
1158 def overriderollback(orig, ui, repo, **opts):
1158 wlock = repo.wlock()
1159 wlock = repo.wlock()
1159 try:
1160 try:
1160 before = repo.dirstate.parents()
1161 before = repo.dirstate.parents()
1161 orphans = set(f for f in repo.dirstate
1162 orphans = set(f for f in repo.dirstate
1162 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1163 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1163 result = orig(ui, repo, **opts)
1164 result = orig(ui, repo, **opts)
1164 after = repo.dirstate.parents()
1165 after = repo.dirstate.parents()
1165 if before == after:
1166 if before == after:
1166 return result # no need to restore standins
1167 return result # no need to restore standins
1167
1168
1168 pctx = repo['.']
1169 pctx = repo['.']
1169 for f in repo.dirstate:
1170 for f in repo.dirstate:
1170 if lfutil.isstandin(f):
1171 if lfutil.isstandin(f):
1171 orphans.discard(f)
1172 orphans.discard(f)
1172 if repo.dirstate[f] == 'r':
1173 if repo.dirstate[f] == 'r':
1173 repo.wvfs.unlinkpath(f, ignoremissing=True)
1174 repo.wvfs.unlinkpath(f, ignoremissing=True)
1174 elif f in pctx:
1175 elif f in pctx:
1175 fctx = pctx[f]
1176 fctx = pctx[f]
1176 repo.wwrite(f, fctx.data(), fctx.flags())
1177 repo.wwrite(f, fctx.data(), fctx.flags())
1177 else:
1178 else:
1178 # content of standin is not so important in 'a',
1179 # content of standin is not so important in 'a',
1179 # 'm' or 'n' (coming from the 2nd parent) cases
1180 # 'm' or 'n' (coming from the 2nd parent) cases
1180 lfutil.writestandin(repo, f, '', False)
1181 lfutil.writestandin(repo, f, '', False)
1181 for standin in orphans:
1182 for standin in orphans:
1182 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1183 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1183
1184
1184 lfdirstate = lfutil.openlfdirstate(ui, repo)
1185 lfdirstate = lfutil.openlfdirstate(ui, repo)
1185 orphans = set(lfdirstate)
1186 orphans = set(lfdirstate)
1186 lfiles = lfutil.listlfiles(repo)
1187 lfiles = lfutil.listlfiles(repo)
1187 for file in lfiles:
1188 for file in lfiles:
1188 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1189 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1189 orphans.discard(file)
1190 orphans.discard(file)
1190 for lfile in orphans:
1191 for lfile in orphans:
1191 lfdirstate.drop(lfile)
1192 lfdirstate.drop(lfile)
1192 lfdirstate.write()
1193 lfdirstate.write()
1193 finally:
1194 finally:
1194 wlock.release()
1195 wlock.release()
1195 return result
1196 return result
1196
1197
1197 def overridetransplant(orig, ui, repo, *revs, **opts):
1198 def overridetransplant(orig, ui, repo, *revs, **opts):
1198 try:
1199 try:
1199 oldstandins = lfutil.getstandinsstate(repo)
1200 oldstandins = lfutil.getstandinsstate(repo)
1200 repo._istransplanting = True
1201 repo._istransplanting = True
1201 result = orig(ui, repo, *revs, **opts)
1202 result = orig(ui, repo, *revs, **opts)
1202 newstandins = lfutil.getstandinsstate(repo)
1203 newstandins = lfutil.getstandinsstate(repo)
1203 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1204 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1204 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1205 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1205 printmessage=True)
1206 printmessage=True)
1206 finally:
1207 finally:
1207 repo._istransplanting = False
1208 repo._istransplanting = False
1208 return result
1209 return result
1209
1210
1210 def overridecat(orig, ui, repo, file1, *pats, **opts):
1211 def overridecat(orig, ui, repo, file1, *pats, **opts):
1211 ctx = scmutil.revsingle(repo, opts.get('rev'))
1212 ctx = scmutil.revsingle(repo, opts.get('rev'))
1212 err = 1
1213 err = 1
1213 notbad = set()
1214 notbad = set()
1214 m = scmutil.match(ctx, (file1,) + pats, opts)
1215 m = scmutil.match(ctx, (file1,) + pats, opts)
1215 origmatchfn = m.matchfn
1216 origmatchfn = m.matchfn
1216 def lfmatchfn(f):
1217 def lfmatchfn(f):
1217 if origmatchfn(f):
1218 if origmatchfn(f):
1218 return True
1219 return True
1219 lf = lfutil.splitstandin(f)
1220 lf = lfutil.splitstandin(f)
1220 if lf is None:
1221 if lf is None:
1221 return False
1222 return False
1222 notbad.add(lf)
1223 notbad.add(lf)
1223 return origmatchfn(lf)
1224 return origmatchfn(lf)
1224 m.matchfn = lfmatchfn
1225 m.matchfn = lfmatchfn
1225 origbadfn = m.bad
1226 origbadfn = m.bad
1226 def lfbadfn(f, msg):
1227 def lfbadfn(f, msg):
1227 if not f in notbad:
1228 if not f in notbad:
1228 origbadfn(f, msg)
1229 origbadfn(f, msg)
1229 m.bad = lfbadfn
1230 m.bad = lfbadfn
1230 for f in ctx.walk(m):
1231 for f in ctx.walk(m):
1231 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1232 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1232 pathname=f)
1233 pathname=f)
1233 lf = lfutil.splitstandin(f)
1234 lf = lfutil.splitstandin(f)
1234 if lf is None or origmatchfn(f):
1235 if lf is None or origmatchfn(f):
1235 # duplicating unreachable code from commands.cat
1236 # duplicating unreachable code from commands.cat
1236 data = ctx[f].data()
1237 data = ctx[f].data()
1237 if opts.get('decode'):
1238 if opts.get('decode'):
1238 data = repo.wwritedata(f, data)
1239 data = repo.wwritedata(f, data)
1239 fp.write(data)
1240 fp.write(data)
1240 else:
1241 else:
1241 hash = lfutil.readstandin(repo, lf, ctx.rev())
1242 hash = lfutil.readstandin(repo, lf, ctx.rev())
1242 if not lfutil.inusercache(repo.ui, hash):
1243 if not lfutil.inusercache(repo.ui, hash):
1243 store = basestore._openstore(repo)
1244 store = basestore._openstore(repo)
1244 success, missing = store.get([(lf, hash)])
1245 success, missing = store.get([(lf, hash)])
1245 if len(success) != 1:
1246 if len(success) != 1:
1246 raise util.Abort(
1247 raise util.Abort(
1247 _('largefile %s is not in cache and could not be '
1248 _('largefile %s is not in cache and could not be '
1248 'downloaded') % lf)
1249 'downloaded') % lf)
1249 path = lfutil.usercachepath(repo.ui, hash)
1250 path = lfutil.usercachepath(repo.ui, hash)
1250 fpin = open(path, "rb")
1251 fpin = open(path, "rb")
1251 for chunk in util.filechunkiter(fpin, 128 * 1024):
1252 for chunk in util.filechunkiter(fpin, 128 * 1024):
1252 fp.write(chunk)
1253 fp.write(chunk)
1253 fpin.close()
1254 fpin.close()
1254 fp.close()
1255 fp.close()
1255 err = 0
1256 err = 0
1256 return err
1257 return err
1257
1258
1258 def mercurialsinkbefore(orig, sink):
1259 def mercurialsinkbefore(orig, sink):
1259 sink.repo._isconverting = True
1260 sink.repo._isconverting = True
1260 orig(sink)
1261 orig(sink)
1261
1262
1262 def mercurialsinkafter(orig, sink):
1263 def mercurialsinkafter(orig, sink):
1263 sink.repo._isconverting = False
1264 sink.repo._isconverting = False
1264 orig(sink)
1265 orig(sink)
1265
1266
1266 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1267 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1267 *args, **kwargs):
1268 *args, **kwargs):
1268 wlock = repo.wlock()
1269 wlock = repo.wlock()
1269 try:
1270 try:
1270 # branch | | |
1271 # branch | | |
1271 # merge | force | partial | action
1272 # merge | force | partial | action
1272 # -------+-------+---------+--------------
1273 # -------+-------+---------+--------------
1273 # x | x | x | linear-merge
1274 # x | x | x | linear-merge
1274 # o | x | x | branch-merge
1275 # o | x | x | branch-merge
1275 # x | o | x | overwrite (as clean update)
1276 # x | o | x | overwrite (as clean update)
1276 # o | o | x | force-branch-merge (*1)
1277 # o | o | x | force-branch-merge (*1)
1277 # x | x | o | (*)
1278 # x | x | o | (*)
1278 # o | x | o | (*)
1279 # o | x | o | (*)
1279 # x | o | o | overwrite (as revert)
1280 # x | o | o | overwrite (as revert)
1280 # o | o | o | (*)
1281 # o | o | o | (*)
1281 #
1282 #
1282 # (*) don't care
1283 # (*) don't care
1283 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1284 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1284
1285
1285 linearmerge = not branchmerge and not force and not partial
1286 linearmerge = not branchmerge and not force and not partial
1286
1287
1287 if linearmerge or (branchmerge and force and not partial):
1288 if linearmerge or (branchmerge and force and not partial):
1288 # update standins for linear-merge or force-branch-merge,
1289 # update standins for linear-merge or force-branch-merge,
1289 # because largefiles in the working directory may be modified
1290 # because largefiles in the working directory may be modified
1290 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1291 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1291 unsure, s = lfdirstate.status(match_.always(repo.root,
1292 unsure, s = lfdirstate.status(match_.always(repo.root,
1292 repo.getcwd()),
1293 repo.getcwd()),
1293 [], False, False, False)
1294 [], False, False, False)
1294 modified, added = s[:2]
1295 modified, added = s[:2]
1295 for lfile in unsure + modified + added:
1296 for lfile in unsure + modified + added:
1296 lfutil.updatestandin(repo, lfutil.standin(lfile))
1297 lfutil.updatestandin(repo, lfutil.standin(lfile))
1297
1298
1298 if linearmerge:
1299 if linearmerge:
1299 # Only call updatelfiles on the standins that have changed
1300 # Only call updatelfiles on the standins that have changed
1300 # to save time
1301 # to save time
1301 oldstandins = lfutil.getstandinsstate(repo)
1302 oldstandins = lfutil.getstandinsstate(repo)
1302
1303
1303 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1304 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1304
1305
1305 filelist = None
1306 filelist = None
1306 if linearmerge:
1307 if linearmerge:
1307 newstandins = lfutil.getstandinsstate(repo)
1308 newstandins = lfutil.getstandinsstate(repo)
1308 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1309 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1309
1310
1310 # suppress status message while automated committing
1311 # suppress status message while automated committing
1311 printmessage = not (getattr(repo, "_isrebasing", False) or
1312 printmessage = not (getattr(repo, "_isrebasing", False) or
1312 getattr(repo, "_istransplanting", False))
1313 getattr(repo, "_istransplanting", False))
1313 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1314 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1314 printmessage=printmessage,
1315 printmessage=printmessage,
1315 normallookup=partial)
1316 normallookup=partial)
1316
1317
1317 return result
1318 return result
1318 finally:
1319 finally:
1319 wlock.release()
1320 wlock.release()
1320
1321
1321 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1322 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1322 result = orig(repo, files, *args, **kwargs)
1323 result = orig(repo, files, *args, **kwargs)
1323
1324
1324 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1325 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1325 if filelist:
1326 if filelist:
1326 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1327 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1327 printmessage=False, normallookup=True)
1328 printmessage=False, normallookup=True)
1328
1329
1329 return result
1330 return result
@@ -1,484 +1,484 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles repositories: reposetup'''
9 '''setup for largefiles repositories: reposetup'''
10 import copy
10 import copy
11 import os
11 import os
12
12
13 from mercurial import error, manifest, match as match_, util
13 from mercurial import error, manifest, match as match_, util
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15 from mercurial import localrepo
15 from mercurial import localrepo, scmutil
16
16
17 import lfcommands
17 import lfcommands
18 import lfutil
18 import lfutil
19
19
20 def reposetup(ui, repo):
20 def reposetup(ui, repo):
21 # wire repositories should be given new wireproto functions
21 # wire repositories should be given new wireproto functions
22 # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs"
22 # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs"
23 if not repo.local():
23 if not repo.local():
24 return
24 return
25
25
26 class lfilesrepo(repo.__class__):
26 class lfilesrepo(repo.__class__):
27 lfstatus = False
27 lfstatus = False
28 def status_nolfiles(self, *args, **kwargs):
28 def status_nolfiles(self, *args, **kwargs):
29 return super(lfilesrepo, self).status(*args, **kwargs)
29 return super(lfilesrepo, self).status(*args, **kwargs)
30
30
31 # When lfstatus is set, return a context that gives the names
31 # When lfstatus is set, return a context that gives the names
32 # of largefiles instead of their corresponding standins and
32 # of largefiles instead of their corresponding standins and
33 # identifies the largefiles as always binary, regardless of
33 # identifies the largefiles as always binary, regardless of
34 # their actual contents.
34 # their actual contents.
35 def __getitem__(self, changeid):
35 def __getitem__(self, changeid):
36 ctx = super(lfilesrepo, self).__getitem__(changeid)
36 ctx = super(lfilesrepo, self).__getitem__(changeid)
37 if self.lfstatus:
37 if self.lfstatus:
38 class lfilesmanifestdict(manifest.manifestdict):
38 class lfilesmanifestdict(manifest.manifestdict):
39 def __contains__(self, filename):
39 def __contains__(self, filename):
40 orig = super(lfilesmanifestdict, self).__contains__
40 orig = super(lfilesmanifestdict, self).__contains__
41 return orig(filename) or orig(lfutil.standin(filename))
41 return orig(filename) or orig(lfutil.standin(filename))
42 class lfilesctx(ctx.__class__):
42 class lfilesctx(ctx.__class__):
43 def files(self):
43 def files(self):
44 filenames = super(lfilesctx, self).files()
44 filenames = super(lfilesctx, self).files()
45 return [lfutil.splitstandin(f) or f for f in filenames]
45 return [lfutil.splitstandin(f) or f for f in filenames]
46 def manifest(self):
46 def manifest(self):
47 man1 = super(lfilesctx, self).manifest()
47 man1 = super(lfilesctx, self).manifest()
48 man1.__class__ = lfilesmanifestdict
48 man1.__class__ = lfilesmanifestdict
49 return man1
49 return man1
50 def filectx(self, path, fileid=None, filelog=None):
50 def filectx(self, path, fileid=None, filelog=None):
51 orig = super(lfilesctx, self).filectx
51 orig = super(lfilesctx, self).filectx
52 try:
52 try:
53 if filelog is not None:
53 if filelog is not None:
54 result = orig(path, fileid, filelog)
54 result = orig(path, fileid, filelog)
55 else:
55 else:
56 result = orig(path, fileid)
56 result = orig(path, fileid)
57 except error.LookupError:
57 except error.LookupError:
58 # Adding a null character will cause Mercurial to
58 # Adding a null character will cause Mercurial to
59 # identify this as a binary file.
59 # identify this as a binary file.
60 if filelog is not None:
60 if filelog is not None:
61 result = orig(lfutil.standin(path), fileid,
61 result = orig(lfutil.standin(path), fileid,
62 filelog)
62 filelog)
63 else:
63 else:
64 result = orig(lfutil.standin(path), fileid)
64 result = orig(lfutil.standin(path), fileid)
65 olddata = result.data
65 olddata = result.data
66 result.data = lambda: olddata() + '\0'
66 result.data = lambda: olddata() + '\0'
67 return result
67 return result
68 ctx.__class__ = lfilesctx
68 ctx.__class__ = lfilesctx
69 return ctx
69 return ctx
70
70
71 # Figure out the status of big files and insert them into the
71 # Figure out the status of big files and insert them into the
72 # appropriate list in the result. Also removes standin files
72 # appropriate list in the result. Also removes standin files
73 # from the listing. Revert to the original status if
73 # from the listing. Revert to the original status if
74 # self.lfstatus is False.
74 # self.lfstatus is False.
75 # XXX large file status is buggy when used on repo proxy.
75 # XXX large file status is buggy when used on repo proxy.
76 # XXX this needs to be investigated.
76 # XXX this needs to be investigated.
77 @localrepo.unfilteredmethod
77 @localrepo.unfilteredmethod
78 def status(self, node1='.', node2=None, match=None, ignored=False,
78 def status(self, node1='.', node2=None, match=None, ignored=False,
79 clean=False, unknown=False, listsubrepos=False):
79 clean=False, unknown=False, listsubrepos=False):
80 listignored, listclean, listunknown = ignored, clean, unknown
80 listignored, listclean, listunknown = ignored, clean, unknown
81 orig = super(lfilesrepo, self).status
81 orig = super(lfilesrepo, self).status
82 if not self.lfstatus:
82 if not self.lfstatus:
83 return orig(node1, node2, match, listignored, listclean,
83 return orig(node1, node2, match, listignored, listclean,
84 listunknown, listsubrepos)
84 listunknown, listsubrepos)
85
85
86 # some calls in this function rely on the old version of status
86 # some calls in this function rely on the old version of status
87 self.lfstatus = False
87 self.lfstatus = False
88 ctx1 = self[node1]
88 ctx1 = self[node1]
89 ctx2 = self[node2]
89 ctx2 = self[node2]
90 working = ctx2.rev() is None
90 working = ctx2.rev() is None
91 parentworking = working and ctx1 == self['.']
91 parentworking = working and ctx1 == self['.']
92
92
93 def inctx(file, ctx):
93 def inctx(file, ctx):
94 try:
94 try:
95 if ctx.rev() is None:
95 if ctx.rev() is None:
96 return file in ctx.manifest()
96 return file in ctx.manifest()
97 ctx[file]
97 ctx[file]
98 return True
98 return True
99 except KeyError:
99 except KeyError:
100 return False
100 return False
101
101
102 if match is None:
102 if match is None:
103 match = match_.always(self.root, self.getcwd())
103 match = match_.always(self.root, self.getcwd())
104
104
105 wlock = None
105 wlock = None
106 try:
106 try:
107 try:
107 try:
108 # updating the dirstate is optional
108 # updating the dirstate is optional
109 # so we don't wait on the lock
109 # so we don't wait on the lock
110 wlock = self.wlock(False)
110 wlock = self.wlock(False)
111 except error.LockError:
111 except error.LockError:
112 pass
112 pass
113
113
114 # First check if there were files specified on the
114 # First check if there were files specified on the
115 # command line. If there were, and none of them were
115 # command line. If there were, and none of them were
116 # largefiles, we should just bail here and let super
116 # largefiles, we should just bail here and let super
117 # handle it -- thus gaining a big performance boost.
117 # handle it -- thus gaining a big performance boost.
118 lfdirstate = lfutil.openlfdirstate(ui, self)
118 lfdirstate = lfutil.openlfdirstate(ui, self)
119 if match.files() and not match.anypats():
119 if match.files() and not match.anypats():
120 for f in lfdirstate:
120 for f in lfdirstate:
121 if match(f):
121 if match(f):
122 break
122 break
123 else:
123 else:
124 return orig(node1, node2, match, listignored, listclean,
124 return orig(node1, node2, match, listignored, listclean,
125 listunknown, listsubrepos)
125 listunknown, listsubrepos)
126
126
127 # Create a copy of match that matches standins instead
127 # Create a copy of match that matches standins instead
128 # of largefiles.
128 # of largefiles.
129 def tostandins(files):
129 def tostandins(files):
130 if not working:
130 if not working:
131 return files
131 return files
132 newfiles = []
132 newfiles = []
133 dirstate = self.dirstate
133 dirstate = self.dirstate
134 for f in files:
134 for f in files:
135 sf = lfutil.standin(f)
135 sf = lfutil.standin(f)
136 if sf in dirstate:
136 if sf in dirstate:
137 newfiles.append(sf)
137 newfiles.append(sf)
138 elif sf in dirstate.dirs():
138 elif sf in dirstate.dirs():
139 # Directory entries could be regular or
139 # Directory entries could be regular or
140 # standin, check both
140 # standin, check both
141 newfiles.extend((f, sf))
141 newfiles.extend((f, sf))
142 else:
142 else:
143 newfiles.append(f)
143 newfiles.append(f)
144 return newfiles
144 return newfiles
145
145
146 m = copy.copy(match)
146 m = copy.copy(match)
147 m._files = tostandins(m._files)
147 m._files = tostandins(m._files)
148
148
149 result = orig(node1, node2, m, ignored, clean, unknown,
149 result = orig(node1, node2, m, ignored, clean, unknown,
150 listsubrepos)
150 listsubrepos)
151 if working:
151 if working:
152
152
153 def sfindirstate(f):
153 def sfindirstate(f):
154 sf = lfutil.standin(f)
154 sf = lfutil.standin(f)
155 dirstate = self.dirstate
155 dirstate = self.dirstate
156 return sf in dirstate or sf in dirstate.dirs()
156 return sf in dirstate or sf in dirstate.dirs()
157
157
158 match._files = [f for f in match._files
158 match._files = [f for f in match._files
159 if sfindirstate(f)]
159 if sfindirstate(f)]
160 # Don't waste time getting the ignored and unknown
160 # Don't waste time getting the ignored and unknown
161 # files from lfdirstate
161 # files from lfdirstate
162 unsure, s = lfdirstate.status(match, [], False, listclean,
162 unsure, s = lfdirstate.status(match, [], False, listclean,
163 False)
163 False)
164 (modified, added, removed, missing, _unknown, _ignored,
164 (modified, added, removed, missing, _unknown, _ignored,
165 clean) = s
165 clean) = s
166 if parentworking:
166 if parentworking:
167 for lfile in unsure:
167 for lfile in unsure:
168 standin = lfutil.standin(lfile)
168 standin = lfutil.standin(lfile)
169 if standin not in ctx1:
169 if standin not in ctx1:
170 # from second parent
170 # from second parent
171 modified.append(lfile)
171 modified.append(lfile)
172 elif ctx1[standin].data().strip() \
172 elif ctx1[standin].data().strip() \
173 != lfutil.hashfile(self.wjoin(lfile)):
173 != lfutil.hashfile(self.wjoin(lfile)):
174 modified.append(lfile)
174 modified.append(lfile)
175 else:
175 else:
176 if listclean:
176 if listclean:
177 clean.append(lfile)
177 clean.append(lfile)
178 lfdirstate.normal(lfile)
178 lfdirstate.normal(lfile)
179 else:
179 else:
180 tocheck = unsure + modified + added + clean
180 tocheck = unsure + modified + added + clean
181 modified, added, clean = [], [], []
181 modified, added, clean = [], [], []
182
182
183 for lfile in tocheck:
183 for lfile in tocheck:
184 standin = lfutil.standin(lfile)
184 standin = lfutil.standin(lfile)
185 if inctx(standin, ctx1):
185 if inctx(standin, ctx1):
186 if ctx1[standin].data().strip() != \
186 if ctx1[standin].data().strip() != \
187 lfutil.hashfile(self.wjoin(lfile)):
187 lfutil.hashfile(self.wjoin(lfile)):
188 modified.append(lfile)
188 modified.append(lfile)
189 elif listclean:
189 elif listclean:
190 clean.append(lfile)
190 clean.append(lfile)
191 else:
191 else:
192 added.append(lfile)
192 added.append(lfile)
193
193
194 # Standins no longer found in lfdirstate has been
194 # Standins no longer found in lfdirstate has been
195 # removed
195 # removed
196 for standin in ctx1.walk(lfutil.getstandinmatcher(self)):
196 for standin in ctx1.walk(lfutil.getstandinmatcher(self)):
197 lfile = lfutil.splitstandin(standin)
197 lfile = lfutil.splitstandin(standin)
198 if not match(lfile):
198 if not match(lfile):
199 continue
199 continue
200 if lfile not in lfdirstate:
200 if lfile not in lfdirstate:
201 removed.append(lfile)
201 removed.append(lfile)
202
202
203 # Filter result lists
203 # Filter result lists
204 result = list(result)
204 result = list(result)
205
205
206 # Largefiles are not really removed when they're
206 # Largefiles are not really removed when they're
207 # still in the normal dirstate. Likewise, normal
207 # still in the normal dirstate. Likewise, normal
208 # files are not really removed if they are still in
208 # files are not really removed if they are still in
209 # lfdirstate. This happens in merges where files
209 # lfdirstate. This happens in merges where files
210 # change type.
210 # change type.
211 removed = [f for f in removed
211 removed = [f for f in removed
212 if f not in self.dirstate]
212 if f not in self.dirstate]
213 result[2] = [f for f in result[2]
213 result[2] = [f for f in result[2]
214 if f not in lfdirstate]
214 if f not in lfdirstate]
215
215
216 lfiles = set(lfdirstate._map)
216 lfiles = set(lfdirstate._map)
217 # Unknown files
217 # Unknown files
218 result[4] = set(result[4]).difference(lfiles)
218 result[4] = set(result[4]).difference(lfiles)
219 # Ignored files
219 # Ignored files
220 result[5] = set(result[5]).difference(lfiles)
220 result[5] = set(result[5]).difference(lfiles)
221 # combine normal files and largefiles
221 # combine normal files and largefiles
222 normals = [[fn for fn in filelist
222 normals = [[fn for fn in filelist
223 if not lfutil.isstandin(fn)]
223 if not lfutil.isstandin(fn)]
224 for filelist in result]
224 for filelist in result]
225 lfstatus = (modified, added, removed, missing, [], [],
225 lfstatus = (modified, added, removed, missing, [], [],
226 clean)
226 clean)
227 result = [sorted(list1 + list2)
227 result = [sorted(list1 + list2)
228 for (list1, list2) in zip(normals, lfstatus)]
228 for (list1, list2) in zip(normals, lfstatus)]
229 else:
229 else:
230 def toname(f):
230 def toname(f):
231 if lfutil.isstandin(f):
231 if lfutil.isstandin(f):
232 return lfutil.splitstandin(f)
232 return lfutil.splitstandin(f)
233 return f
233 return f
234 result = [[toname(f) for f in items]
234 result = [[toname(f) for f in items]
235 for items in result]
235 for items in result]
236
236
237 if wlock:
237 if wlock:
238 lfdirstate.write()
238 lfdirstate.write()
239
239
240 finally:
240 finally:
241 if wlock:
241 if wlock:
242 wlock.release()
242 wlock.release()
243
243
244 self.lfstatus = True
244 self.lfstatus = True
245 return result
245 return scmutil.status(*result)
246
246
247 # As part of committing, copy all of the largefiles into the
247 # As part of committing, copy all of the largefiles into the
248 # cache.
248 # cache.
249 def commitctx(self, *args, **kwargs):
249 def commitctx(self, *args, **kwargs):
250 node = super(lfilesrepo, self).commitctx(*args, **kwargs)
250 node = super(lfilesrepo, self).commitctx(*args, **kwargs)
251 lfutil.copyalltostore(self, node)
251 lfutil.copyalltostore(self, node)
252 return node
252 return node
253
253
254 # Before commit, largefile standins have not had their
254 # Before commit, largefile standins have not had their
255 # contents updated to reflect the hash of their largefile.
255 # contents updated to reflect the hash of their largefile.
256 # Do that here.
256 # Do that here.
257 def commit(self, text="", user=None, date=None, match=None,
257 def commit(self, text="", user=None, date=None, match=None,
258 force=False, editor=False, extra={}):
258 force=False, editor=False, extra={}):
259 orig = super(lfilesrepo, self).commit
259 orig = super(lfilesrepo, self).commit
260
260
261 wlock = self.wlock()
261 wlock = self.wlock()
262 try:
262 try:
263 # Case 0: Automated committing
263 # Case 0: Automated committing
264 #
264 #
265 # While automated committing (like rebase, transplant
265 # While automated committing (like rebase, transplant
266 # and so on), this code path is used to avoid:
266 # and so on), this code path is used to avoid:
267 # (1) updating standins, because standins should
267 # (1) updating standins, because standins should
268 # be already updated at this point
268 # be already updated at this point
269 # (2) aborting when stadnins are matched by "match",
269 # (2) aborting when stadnins are matched by "match",
270 # because automated committing may specify them directly
270 # because automated committing may specify them directly
271 #
271 #
272 if getattr(self, "_isrebasing", False) or \
272 if getattr(self, "_isrebasing", False) or \
273 getattr(self, "_istransplanting", False):
273 getattr(self, "_istransplanting", False):
274 result = orig(text=text, user=user, date=date, match=match,
274 result = orig(text=text, user=user, date=date, match=match,
275 force=force, editor=editor, extra=extra)
275 force=force, editor=editor, extra=extra)
276
276
277 if result:
277 if result:
278 lfdirstate = lfutil.openlfdirstate(ui, self)
278 lfdirstate = lfutil.openlfdirstate(ui, self)
279 for f in self[result].files():
279 for f in self[result].files():
280 if lfutil.isstandin(f):
280 if lfutil.isstandin(f):
281 lfile = lfutil.splitstandin(f)
281 lfile = lfutil.splitstandin(f)
282 lfutil.synclfdirstate(self, lfdirstate, lfile,
282 lfutil.synclfdirstate(self, lfdirstate, lfile,
283 False)
283 False)
284 lfdirstate.write()
284 lfdirstate.write()
285
285
286 return result
286 return result
287 # Case 1: user calls commit with no specific files or
287 # Case 1: user calls commit with no specific files or
288 # include/exclude patterns: refresh and commit all files that
288 # include/exclude patterns: refresh and commit all files that
289 # are "dirty".
289 # are "dirty".
290 if ((match is None) or
290 if ((match is None) or
291 (not match.anypats() and not match.files())):
291 (not match.anypats() and not match.files())):
292 # Spend a bit of time here to get a list of files we know
292 # Spend a bit of time here to get a list of files we know
293 # are modified so we can compare only against those.
293 # are modified so we can compare only against those.
294 # It can cost a lot of time (several seconds)
294 # It can cost a lot of time (several seconds)
295 # otherwise to update all standins if the largefiles are
295 # otherwise to update all standins if the largefiles are
296 # large.
296 # large.
297 lfdirstate = lfutil.openlfdirstate(ui, self)
297 lfdirstate = lfutil.openlfdirstate(ui, self)
298 dirtymatch = match_.always(self.root, self.getcwd())
298 dirtymatch = match_.always(self.root, self.getcwd())
299 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
299 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
300 False)
300 False)
301 modified, added, removed = s[:3]
301 modified, added, removed = s[:3]
302 modifiedfiles = unsure + modified + added + removed
302 modifiedfiles = unsure + modified + added + removed
303 lfiles = lfutil.listlfiles(self)
303 lfiles = lfutil.listlfiles(self)
304 # this only loops through largefiles that exist (not
304 # this only loops through largefiles that exist (not
305 # removed/renamed)
305 # removed/renamed)
306 for lfile in lfiles:
306 for lfile in lfiles:
307 if lfile in modifiedfiles:
307 if lfile in modifiedfiles:
308 if os.path.exists(
308 if os.path.exists(
309 self.wjoin(lfutil.standin(lfile))):
309 self.wjoin(lfutil.standin(lfile))):
310 # this handles the case where a rebase is being
310 # this handles the case where a rebase is being
311 # performed and the working copy is not updated
311 # performed and the working copy is not updated
312 # yet.
312 # yet.
313 if os.path.exists(self.wjoin(lfile)):
313 if os.path.exists(self.wjoin(lfile)):
314 lfutil.updatestandin(self,
314 lfutil.updatestandin(self,
315 lfutil.standin(lfile))
315 lfutil.standin(lfile))
316 lfdirstate.normal(lfile)
316 lfdirstate.normal(lfile)
317
317
318 result = orig(text=text, user=user, date=date, match=match,
318 result = orig(text=text, user=user, date=date, match=match,
319 force=force, editor=editor, extra=extra)
319 force=force, editor=editor, extra=extra)
320
320
321 if result is not None:
321 if result is not None:
322 for lfile in lfdirstate:
322 for lfile in lfdirstate:
323 if lfile in modifiedfiles:
323 if lfile in modifiedfiles:
324 if (not os.path.exists(self.wjoin(
324 if (not os.path.exists(self.wjoin(
325 lfutil.standin(lfile)))) or \
325 lfutil.standin(lfile)))) or \
326 (not os.path.exists(self.wjoin(lfile))):
326 (not os.path.exists(self.wjoin(lfile))):
327 lfdirstate.drop(lfile)
327 lfdirstate.drop(lfile)
328
328
329 # This needs to be after commit; otherwise precommit hooks
329 # This needs to be after commit; otherwise precommit hooks
330 # get the wrong status
330 # get the wrong status
331 lfdirstate.write()
331 lfdirstate.write()
332 return result
332 return result
333
333
334 lfiles = lfutil.listlfiles(self)
334 lfiles = lfutil.listlfiles(self)
335 match._files = self._subdirlfs(match.files(), lfiles)
335 match._files = self._subdirlfs(match.files(), lfiles)
336
336
337 # Case 2: user calls commit with specified patterns: refresh
337 # Case 2: user calls commit with specified patterns: refresh
338 # any matching big files.
338 # any matching big files.
339 smatcher = lfutil.composestandinmatcher(self, match)
339 smatcher = lfutil.composestandinmatcher(self, match)
340 standins = self.dirstate.walk(smatcher, [], False, False)
340 standins = self.dirstate.walk(smatcher, [], False, False)
341
341
342 # No matching big files: get out of the way and pass control to
342 # No matching big files: get out of the way and pass control to
343 # the usual commit() method.
343 # the usual commit() method.
344 if not standins:
344 if not standins:
345 return orig(text=text, user=user, date=date, match=match,
345 return orig(text=text, user=user, date=date, match=match,
346 force=force, editor=editor, extra=extra)
346 force=force, editor=editor, extra=extra)
347
347
348 # Refresh all matching big files. It's possible that the
348 # Refresh all matching big files. It's possible that the
349 # commit will end up failing, in which case the big files will
349 # commit will end up failing, in which case the big files will
350 # stay refreshed. No harm done: the user modified them and
350 # stay refreshed. No harm done: the user modified them and
351 # asked to commit them, so sooner or later we're going to
351 # asked to commit them, so sooner or later we're going to
352 # refresh the standins. Might as well leave them refreshed.
352 # refresh the standins. Might as well leave them refreshed.
353 lfdirstate = lfutil.openlfdirstate(ui, self)
353 lfdirstate = lfutil.openlfdirstate(ui, self)
354 for standin in standins:
354 for standin in standins:
355 lfile = lfutil.splitstandin(standin)
355 lfile = lfutil.splitstandin(standin)
356 if lfdirstate[lfile] != 'r':
356 if lfdirstate[lfile] != 'r':
357 lfutil.updatestandin(self, standin)
357 lfutil.updatestandin(self, standin)
358 lfdirstate.normal(lfile)
358 lfdirstate.normal(lfile)
359 else:
359 else:
360 lfdirstate.drop(lfile)
360 lfdirstate.drop(lfile)
361
361
362 # Cook up a new matcher that only matches regular files or
362 # Cook up a new matcher that only matches regular files or
363 # standins corresponding to the big files requested by the
363 # standins corresponding to the big files requested by the
364 # user. Have to modify _files to prevent commit() from
364 # user. Have to modify _files to prevent commit() from
365 # complaining "not tracked" for big files.
365 # complaining "not tracked" for big files.
366 match = copy.copy(match)
366 match = copy.copy(match)
367 origmatchfn = match.matchfn
367 origmatchfn = match.matchfn
368
368
369 # Check both the list of largefiles and the list of
369 # Check both the list of largefiles and the list of
370 # standins because if a largefile was removed, it
370 # standins because if a largefile was removed, it
371 # won't be in the list of largefiles at this point
371 # won't be in the list of largefiles at this point
372 match._files += sorted(standins)
372 match._files += sorted(standins)
373
373
374 actualfiles = []
374 actualfiles = []
375 for f in match._files:
375 for f in match._files:
376 fstandin = lfutil.standin(f)
376 fstandin = lfutil.standin(f)
377
377
378 # ignore known largefiles and standins
378 # ignore known largefiles and standins
379 if f in lfiles or fstandin in standins:
379 if f in lfiles or fstandin in standins:
380 continue
380 continue
381
381
382 actualfiles.append(f)
382 actualfiles.append(f)
383 match._files = actualfiles
383 match._files = actualfiles
384
384
385 def matchfn(f):
385 def matchfn(f):
386 if origmatchfn(f):
386 if origmatchfn(f):
387 return f not in lfiles
387 return f not in lfiles
388 else:
388 else:
389 return f in standins
389 return f in standins
390
390
391 match.matchfn = matchfn
391 match.matchfn = matchfn
392 result = orig(text=text, user=user, date=date, match=match,
392 result = orig(text=text, user=user, date=date, match=match,
393 force=force, editor=editor, extra=extra)
393 force=force, editor=editor, extra=extra)
394 # This needs to be after commit; otherwise precommit hooks
394 # This needs to be after commit; otherwise precommit hooks
395 # get the wrong status
395 # get the wrong status
396 lfdirstate.write()
396 lfdirstate.write()
397 return result
397 return result
398 finally:
398 finally:
399 wlock.release()
399 wlock.release()
400
400
401 def push(self, remote, force=False, revs=None, newbranch=False):
401 def push(self, remote, force=False, revs=None, newbranch=False):
402 if remote.local():
402 if remote.local():
403 missing = set(self.requirements) - remote.local().supported
403 missing = set(self.requirements) - remote.local().supported
404 if missing:
404 if missing:
405 msg = _("required features are not"
405 msg = _("required features are not"
406 " supported in the destination:"
406 " supported in the destination:"
407 " %s") % (', '.join(sorted(missing)))
407 " %s") % (', '.join(sorted(missing)))
408 raise util.Abort(msg)
408 raise util.Abort(msg)
409 return super(lfilesrepo, self).push(remote, force=force, revs=revs,
409 return super(lfilesrepo, self).push(remote, force=force, revs=revs,
410 newbranch=newbranch)
410 newbranch=newbranch)
411
411
412 def _subdirlfs(self, files, lfiles):
412 def _subdirlfs(self, files, lfiles):
413 '''
413 '''
414 Adjust matched file list
414 Adjust matched file list
415 If we pass a directory to commit whose only commitable files
415 If we pass a directory to commit whose only commitable files
416 are largefiles, the core commit code aborts before finding
416 are largefiles, the core commit code aborts before finding
417 the largefiles.
417 the largefiles.
418 So we do the following:
418 So we do the following:
419 For directories that only have largefiles as matches,
419 For directories that only have largefiles as matches,
420 we explicitly add the largefiles to the match list and remove
420 we explicitly add the largefiles to the match list and remove
421 the directory.
421 the directory.
422 In other cases, we leave the match list unmodified.
422 In other cases, we leave the match list unmodified.
423 '''
423 '''
424 actualfiles = []
424 actualfiles = []
425 dirs = []
425 dirs = []
426 regulars = []
426 regulars = []
427
427
428 for f in files:
428 for f in files:
429 if lfutil.isstandin(f + '/'):
429 if lfutil.isstandin(f + '/'):
430 raise util.Abort(
430 raise util.Abort(
431 _('file "%s" is a largefile standin') % f,
431 _('file "%s" is a largefile standin') % f,
432 hint=('commit the largefile itself instead'))
432 hint=('commit the largefile itself instead'))
433 # Scan directories
433 # Scan directories
434 if os.path.isdir(self.wjoin(f)):
434 if os.path.isdir(self.wjoin(f)):
435 dirs.append(f)
435 dirs.append(f)
436 else:
436 else:
437 regulars.append(f)
437 regulars.append(f)
438
438
439 for f in dirs:
439 for f in dirs:
440 matcheddir = False
440 matcheddir = False
441 d = self.dirstate.normalize(f) + '/'
441 d = self.dirstate.normalize(f) + '/'
442 # Check for matched normal files
442 # Check for matched normal files
443 for mf in regulars:
443 for mf in regulars:
444 if self.dirstate.normalize(mf).startswith(d):
444 if self.dirstate.normalize(mf).startswith(d):
445 actualfiles.append(f)
445 actualfiles.append(f)
446 matcheddir = True
446 matcheddir = True
447 break
447 break
448 if not matcheddir:
448 if not matcheddir:
449 # If no normal match, manually append
449 # If no normal match, manually append
450 # any matching largefiles
450 # any matching largefiles
451 for lf in lfiles:
451 for lf in lfiles:
452 if self.dirstate.normalize(lf).startswith(d):
452 if self.dirstate.normalize(lf).startswith(d):
453 actualfiles.append(lf)
453 actualfiles.append(lf)
454 if not matcheddir:
454 if not matcheddir:
455 actualfiles.append(lfutil.standin(f))
455 actualfiles.append(lfutil.standin(f))
456 matcheddir = True
456 matcheddir = True
457 # Nothing in dir, so readd it
457 # Nothing in dir, so readd it
458 # and let commit reject it
458 # and let commit reject it
459 if not matcheddir:
459 if not matcheddir:
460 actualfiles.append(f)
460 actualfiles.append(f)
461
461
462 # Always add normal files
462 # Always add normal files
463 actualfiles += regulars
463 actualfiles += regulars
464 return actualfiles
464 return actualfiles
465
465
466 repo.__class__ = lfilesrepo
466 repo.__class__ = lfilesrepo
467
467
468 def prepushoutgoinghook(local, remote, outgoing):
468 def prepushoutgoinghook(local, remote, outgoing):
469 if outgoing.missing:
469 if outgoing.missing:
470 toupload = set()
470 toupload = set()
471 addfunc = lambda fn, lfhash: toupload.add(lfhash)
471 addfunc = lambda fn, lfhash: toupload.add(lfhash)
472 lfutil.getlfilestoupload(local, outgoing.missing, addfunc)
472 lfutil.getlfilestoupload(local, outgoing.missing, addfunc)
473 lfcommands.uploadlfiles(ui, local, remote, toupload)
473 lfcommands.uploadlfiles(ui, local, remote, toupload)
474 repo.prepushoutgoinghooks.add("largefiles", prepushoutgoinghook)
474 repo.prepushoutgoinghooks.add("largefiles", prepushoutgoinghook)
475
475
476 def checkrequireslfiles(ui, repo, **kwargs):
476 def checkrequireslfiles(ui, repo, **kwargs):
477 if 'largefiles' not in repo.requirements and util.any(
477 if 'largefiles' not in repo.requirements and util.any(
478 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
478 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
479 repo.requirements.add('largefiles')
479 repo.requirements.add('largefiles')
480 repo._writerequirements()
480 repo._writerequirements()
481
481
482 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles,
482 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles,
483 'largefiles')
483 'largefiles')
484 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles, 'largefiles')
484 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles, 'largefiles')
@@ -1,1710 +1,1710 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex, bin
8 from node import nullid, nullrev, short, hex, bin
9 from i18n import _
9 from i18n import _
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, stat
12 import os, errno, stat
13 import obsolete as obsmod
13 import obsolete as obsmod
14 import repoview
14 import repoview
15 import fileset
15 import fileset
16 import revlog
16 import revlog
17
17
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class basectx(object):
20 class basectx(object):
21 """A basectx object represents the common logic for its children:
21 """A basectx object represents the common logic for its children:
22 changectx: read-only context that is already present in the repo,
22 changectx: read-only context that is already present in the repo,
23 workingctx: a context that represents the working directory and can
23 workingctx: a context that represents the working directory and can
24 be committed,
24 be committed,
25 memctx: a context that represents changes in-memory and can also
25 memctx: a context that represents changes in-memory and can also
26 be committed."""
26 be committed."""
27 def __new__(cls, repo, changeid='', *args, **kwargs):
27 def __new__(cls, repo, changeid='', *args, **kwargs):
28 if isinstance(changeid, basectx):
28 if isinstance(changeid, basectx):
29 return changeid
29 return changeid
30
30
31 o = super(basectx, cls).__new__(cls)
31 o = super(basectx, cls).__new__(cls)
32
32
33 o._repo = repo
33 o._repo = repo
34 o._rev = nullrev
34 o._rev = nullrev
35 o._node = nullid
35 o._node = nullid
36
36
37 return o
37 return o
38
38
39 def __str__(self):
39 def __str__(self):
40 return short(self.node())
40 return short(self.node())
41
41
42 def __int__(self):
42 def __int__(self):
43 return self.rev()
43 return self.rev()
44
44
45 def __repr__(self):
45 def __repr__(self):
46 return "<%s %s>" % (type(self).__name__, str(self))
46 return "<%s %s>" % (type(self).__name__, str(self))
47
47
48 def __eq__(self, other):
48 def __eq__(self, other):
49 try:
49 try:
50 return type(self) == type(other) and self._rev == other._rev
50 return type(self) == type(other) and self._rev == other._rev
51 except AttributeError:
51 except AttributeError:
52 return False
52 return False
53
53
54 def __ne__(self, other):
54 def __ne__(self, other):
55 return not (self == other)
55 return not (self == other)
56
56
57 def __contains__(self, key):
57 def __contains__(self, key):
58 return key in self._manifest
58 return key in self._manifest
59
59
60 def __getitem__(self, key):
60 def __getitem__(self, key):
61 return self.filectx(key)
61 return self.filectx(key)
62
62
63 def __iter__(self):
63 def __iter__(self):
64 for f in sorted(self._manifest):
64 for f in sorted(self._manifest):
65 yield f
65 yield f
66
66
67 def _manifestmatches(self, match, s):
67 def _manifestmatches(self, match, s):
68 """generate a new manifest filtered by the match argument
68 """generate a new manifest filtered by the match argument
69
69
70 This method is for internal use only and mainly exists to provide an
70 This method is for internal use only and mainly exists to provide an
71 object oriented way for other contexts to customize the manifest
71 object oriented way for other contexts to customize the manifest
72 generation.
72 generation.
73 """
73 """
74 if match.always():
74 if match.always():
75 return self.manifest().copy()
75 return self.manifest().copy()
76
76
77 files = match.files()
77 files = match.files()
78 if (match.matchfn == match.exact or
78 if (match.matchfn == match.exact or
79 (not match.anypats() and util.all(fn in self for fn in files))):
79 (not match.anypats() and util.all(fn in self for fn in files))):
80 return self.manifest().intersectfiles(files)
80 return self.manifest().intersectfiles(files)
81
81
82 mf = self.manifest().copy()
82 mf = self.manifest().copy()
83 for fn in mf.keys():
83 for fn in mf.keys():
84 if not match(fn):
84 if not match(fn):
85 del mf[fn]
85 del mf[fn]
86 return mf
86 return mf
87
87
88 def _matchstatus(self, other, s, match, listignored, listclean,
88 def _matchstatus(self, other, s, match, listignored, listclean,
89 listunknown):
89 listunknown):
90 """return match.always if match is none
90 """return match.always if match is none
91
91
92 This internal method provides a way for child objects to override the
92 This internal method provides a way for child objects to override the
93 match operator.
93 match operator.
94 """
94 """
95 return match or matchmod.always(self._repo.root, self._repo.getcwd())
95 return match or matchmod.always(self._repo.root, self._repo.getcwd())
96
96
97 def _prestatus(self, other, s, match, listignored, listclean, listunknown):
97 def _prestatus(self, other, s, match, listignored, listclean, listunknown):
98 """provide a hook to allow child objects to preprocess status results
98 """provide a hook to allow child objects to preprocess status results
99
99
100 For example, this allows other contexts, such as workingctx, to query
100 For example, this allows other contexts, such as workingctx, to query
101 the dirstate before comparing the manifests.
101 the dirstate before comparing the manifests.
102 """
102 """
103 # load earliest manifest first for caching reasons
103 # load earliest manifest first for caching reasons
104 if self.rev() < other.rev():
104 if self.rev() < other.rev():
105 self.manifest()
105 self.manifest()
106 return s
106 return s
107
107
108 def _poststatus(self, other, s, match, listignored, listclean, listunknown):
108 def _poststatus(self, other, s, match, listignored, listclean, listunknown):
109 """provide a hook to allow child objects to postprocess status results
109 """provide a hook to allow child objects to postprocess status results
110
110
111 For example, this allows other contexts, such as workingctx, to filter
111 For example, this allows other contexts, such as workingctx, to filter
112 suspect symlinks in the case of FAT32 and NTFS filesytems.
112 suspect symlinks in the case of FAT32 and NTFS filesytems.
113 """
113 """
114 return s
114 return s
115
115
116 def _buildstatus(self, other, s, match, listignored, listclean,
116 def _buildstatus(self, other, s, match, listignored, listclean,
117 listunknown):
117 listunknown):
118 """build a status with respect to another context"""
118 """build a status with respect to another context"""
119 mf1 = other._manifestmatches(match, s)
119 mf1 = other._manifestmatches(match, s)
120 mf2 = self._manifestmatches(match, s)
120 mf2 = self._manifestmatches(match, s)
121
121
122 modified, added, clean = [], [], []
122 modified, added, clean = [], [], []
123 deleted, unknown, ignored = s[3], s[4], s[5]
123 deleted, unknown, ignored = s[3], s[4], s[5]
124 withflags = mf1.withflags() | mf2.withflags()
124 withflags = mf1.withflags() | mf2.withflags()
125 for fn, mf2node in mf2.iteritems():
125 for fn, mf2node in mf2.iteritems():
126 if fn in mf1:
126 if fn in mf1:
127 if (fn not in deleted and
127 if (fn not in deleted and
128 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
128 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
129 (mf1[fn] != mf2node and
129 (mf1[fn] != mf2node and
130 (mf2node or self[fn].cmp(other[fn]))))):
130 (mf2node or self[fn].cmp(other[fn]))))):
131 modified.append(fn)
131 modified.append(fn)
132 elif listclean:
132 elif listclean:
133 clean.append(fn)
133 clean.append(fn)
134 del mf1[fn]
134 del mf1[fn]
135 elif fn not in deleted:
135 elif fn not in deleted:
136 added.append(fn)
136 added.append(fn)
137 removed = mf1.keys()
137 removed = mf1.keys()
138 if removed:
138 if removed:
139 # need to filter files if they are already reported as removed
139 # need to filter files if they are already reported as removed
140 unknown = [fn for fn in unknown if fn not in mf1]
140 unknown = [fn for fn in unknown if fn not in mf1]
141 ignored = [fn for fn in ignored if fn not in mf1]
141 ignored = [fn for fn in ignored if fn not in mf1]
142
142
143 return [modified, added, removed, deleted, unknown, ignored, clean]
143 return [modified, added, removed, deleted, unknown, ignored, clean]
144
144
145 @propertycache
145 @propertycache
146 def substate(self):
146 def substate(self):
147 return subrepo.state(self, self._repo.ui)
147 return subrepo.state(self, self._repo.ui)
148
148
149 def subrev(self, subpath):
149 def subrev(self, subpath):
150 return self.substate[subpath][1]
150 return self.substate[subpath][1]
151
151
152 def rev(self):
152 def rev(self):
153 return self._rev
153 return self._rev
154 def node(self):
154 def node(self):
155 return self._node
155 return self._node
156 def hex(self):
156 def hex(self):
157 return hex(self.node())
157 return hex(self.node())
158 def manifest(self):
158 def manifest(self):
159 return self._manifest
159 return self._manifest
160 def phasestr(self):
160 def phasestr(self):
161 return phases.phasenames[self.phase()]
161 return phases.phasenames[self.phase()]
162 def mutable(self):
162 def mutable(self):
163 return self.phase() > phases.public
163 return self.phase() > phases.public
164
164
165 def getfileset(self, expr):
165 def getfileset(self, expr):
166 return fileset.getfileset(self, expr)
166 return fileset.getfileset(self, expr)
167
167
168 def obsolete(self):
168 def obsolete(self):
169 """True if the changeset is obsolete"""
169 """True if the changeset is obsolete"""
170 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
170 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
171
171
172 def extinct(self):
172 def extinct(self):
173 """True if the changeset is extinct"""
173 """True if the changeset is extinct"""
174 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
174 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
175
175
176 def unstable(self):
176 def unstable(self):
177 """True if the changeset is not obsolete but it's ancestor are"""
177 """True if the changeset is not obsolete but it's ancestor are"""
178 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
178 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
179
179
180 def bumped(self):
180 def bumped(self):
181 """True if the changeset try to be a successor of a public changeset
181 """True if the changeset try to be a successor of a public changeset
182
182
183 Only non-public and non-obsolete changesets may be bumped.
183 Only non-public and non-obsolete changesets may be bumped.
184 """
184 """
185 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
185 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
186
186
187 def divergent(self):
187 def divergent(self):
188 """Is a successors of a changeset with multiple possible successors set
188 """Is a successors of a changeset with multiple possible successors set
189
189
190 Only non-public and non-obsolete changesets may be divergent.
190 Only non-public and non-obsolete changesets may be divergent.
191 """
191 """
192 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
192 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
193
193
194 def troubled(self):
194 def troubled(self):
195 """True if the changeset is either unstable, bumped or divergent"""
195 """True if the changeset is either unstable, bumped or divergent"""
196 return self.unstable() or self.bumped() or self.divergent()
196 return self.unstable() or self.bumped() or self.divergent()
197
197
198 def troubles(self):
198 def troubles(self):
199 """return the list of troubles affecting this changesets.
199 """return the list of troubles affecting this changesets.
200
200
201 Troubles are returned as strings. possible values are:
201 Troubles are returned as strings. possible values are:
202 - unstable,
202 - unstable,
203 - bumped,
203 - bumped,
204 - divergent.
204 - divergent.
205 """
205 """
206 troubles = []
206 troubles = []
207 if self.unstable():
207 if self.unstable():
208 troubles.append('unstable')
208 troubles.append('unstable')
209 if self.bumped():
209 if self.bumped():
210 troubles.append('bumped')
210 troubles.append('bumped')
211 if self.divergent():
211 if self.divergent():
212 troubles.append('divergent')
212 troubles.append('divergent')
213 return troubles
213 return troubles
214
214
215 def parents(self):
215 def parents(self):
216 """return contexts for each parent changeset"""
216 """return contexts for each parent changeset"""
217 return self._parents
217 return self._parents
218
218
219 def p1(self):
219 def p1(self):
220 return self._parents[0]
220 return self._parents[0]
221
221
222 def p2(self):
222 def p2(self):
223 if len(self._parents) == 2:
223 if len(self._parents) == 2:
224 return self._parents[1]
224 return self._parents[1]
225 return changectx(self._repo, -1)
225 return changectx(self._repo, -1)
226
226
227 def _fileinfo(self, path):
227 def _fileinfo(self, path):
228 if '_manifest' in self.__dict__:
228 if '_manifest' in self.__dict__:
229 try:
229 try:
230 return self._manifest[path], self._manifest.flags(path)
230 return self._manifest[path], self._manifest.flags(path)
231 except KeyError:
231 except KeyError:
232 raise error.ManifestLookupError(self._node, path,
232 raise error.ManifestLookupError(self._node, path,
233 _('not found in manifest'))
233 _('not found in manifest'))
234 if '_manifestdelta' in self.__dict__ or path in self.files():
234 if '_manifestdelta' in self.__dict__ or path in self.files():
235 if path in self._manifestdelta:
235 if path in self._manifestdelta:
236 return (self._manifestdelta[path],
236 return (self._manifestdelta[path],
237 self._manifestdelta.flags(path))
237 self._manifestdelta.flags(path))
238 node, flag = self._repo.manifest.find(self._changeset[0], path)
238 node, flag = self._repo.manifest.find(self._changeset[0], path)
239 if not node:
239 if not node:
240 raise error.ManifestLookupError(self._node, path,
240 raise error.ManifestLookupError(self._node, path,
241 _('not found in manifest'))
241 _('not found in manifest'))
242
242
243 return node, flag
243 return node, flag
244
244
245 def filenode(self, path):
245 def filenode(self, path):
246 return self._fileinfo(path)[0]
246 return self._fileinfo(path)[0]
247
247
248 def flags(self, path):
248 def flags(self, path):
249 try:
249 try:
250 return self._fileinfo(path)[1]
250 return self._fileinfo(path)[1]
251 except error.LookupError:
251 except error.LookupError:
252 return ''
252 return ''
253
253
254 def sub(self, path):
254 def sub(self, path):
255 return subrepo.subrepo(self, path)
255 return subrepo.subrepo(self, path)
256
256
257 def match(self, pats=[], include=None, exclude=None, default='glob'):
257 def match(self, pats=[], include=None, exclude=None, default='glob'):
258 r = self._repo
258 r = self._repo
259 return matchmod.match(r.root, r.getcwd(), pats,
259 return matchmod.match(r.root, r.getcwd(), pats,
260 include, exclude, default,
260 include, exclude, default,
261 auditor=r.auditor, ctx=self)
261 auditor=r.auditor, ctx=self)
262
262
263 def diff(self, ctx2=None, match=None, **opts):
263 def diff(self, ctx2=None, match=None, **opts):
264 """Returns a diff generator for the given contexts and matcher"""
264 """Returns a diff generator for the given contexts and matcher"""
265 if ctx2 is None:
265 if ctx2 is None:
266 ctx2 = self.p1()
266 ctx2 = self.p1()
267 if ctx2 is not None:
267 if ctx2 is not None:
268 ctx2 = self._repo[ctx2]
268 ctx2 = self._repo[ctx2]
269 diffopts = patch.diffopts(self._repo.ui, opts)
269 diffopts = patch.diffopts(self._repo.ui, opts)
270 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
270 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
271
271
272 @propertycache
272 @propertycache
273 def _dirs(self):
273 def _dirs(self):
274 return scmutil.dirs(self._manifest)
274 return scmutil.dirs(self._manifest)
275
275
276 def dirs(self):
276 def dirs(self):
277 return self._dirs
277 return self._dirs
278
278
279 def dirty(self, missing=False, merge=True, branch=True):
279 def dirty(self, missing=False, merge=True, branch=True):
280 return False
280 return False
281
281
282 def status(self, other=None, match=None, listignored=False,
282 def status(self, other=None, match=None, listignored=False,
283 listclean=False, listunknown=False, listsubrepos=False):
283 listclean=False, listunknown=False, listsubrepos=False):
284 """return status of files between two nodes or node and working
284 """return status of files between two nodes or node and working
285 directory.
285 directory.
286
286
287 If other is None, compare this node with working directory.
287 If other is None, compare this node with working directory.
288
288
289 returns (modified, added, removed, deleted, unknown, ignored, clean)
289 returns (modified, added, removed, deleted, unknown, ignored, clean)
290 """
290 """
291
291
292 ctx1 = self
292 ctx1 = self
293 ctx2 = self._repo[other]
293 ctx2 = self._repo[other]
294
294
295 # This next code block is, admittedly, fragile logic that tests for
295 # This next code block is, admittedly, fragile logic that tests for
296 # reversing the contexts and wouldn't need to exist if it weren't for
296 # reversing the contexts and wouldn't need to exist if it weren't for
297 # the fast (and common) code path of comparing the working directory
297 # the fast (and common) code path of comparing the working directory
298 # with its first parent.
298 # with its first parent.
299 #
299 #
300 # What we're aiming for here is the ability to call:
300 # What we're aiming for here is the ability to call:
301 #
301 #
302 # workingctx.status(parentctx)
302 # workingctx.status(parentctx)
303 #
303 #
304 # If we always built the manifest for each context and compared those,
304 # If we always built the manifest for each context and compared those,
305 # then we'd be done. But the special case of the above call means we
305 # then we'd be done. But the special case of the above call means we
306 # just copy the manifest of the parent.
306 # just copy the manifest of the parent.
307 reversed = False
307 reversed = False
308 if (not isinstance(ctx1, changectx)
308 if (not isinstance(ctx1, changectx)
309 and isinstance(ctx2, changectx)):
309 and isinstance(ctx2, changectx)):
310 reversed = True
310 reversed = True
311 ctx1, ctx2 = ctx2, ctx1
311 ctx1, ctx2 = ctx2, ctx1
312
312
313 r = [[], [], [], [], [], [], []]
313 r = [[], [], [], [], [], [], []]
314 match = ctx2._matchstatus(ctx1, r, match, listignored, listclean,
314 match = ctx2._matchstatus(ctx1, r, match, listignored, listclean,
315 listunknown)
315 listunknown)
316 r = ctx2._prestatus(ctx1, r, match, listignored, listclean, listunknown)
316 r = ctx2._prestatus(ctx1, r, match, listignored, listclean, listunknown)
317 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
317 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
318 listunknown)
318 listunknown)
319 r = ctx2._poststatus(ctx1, r, match, listignored, listclean,
319 r = ctx2._poststatus(ctx1, r, match, listignored, listclean,
320 listunknown)
320 listunknown)
321
321
322 if reversed:
322 if reversed:
323 # reverse added and removed
323 # reverse added and removed
324 r[1], r[2] = r[2], r[1]
324 r[1], r[2] = r[2], r[1]
325
325
326 if listsubrepos:
326 if listsubrepos:
327 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
327 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
328 rev2 = ctx2.subrev(subpath)
328 rev2 = ctx2.subrev(subpath)
329 try:
329 try:
330 submatch = matchmod.narrowmatcher(subpath, match)
330 submatch = matchmod.narrowmatcher(subpath, match)
331 s = sub.status(rev2, match=submatch, ignored=listignored,
331 s = sub.status(rev2, match=submatch, ignored=listignored,
332 clean=listclean, unknown=listunknown,
332 clean=listclean, unknown=listunknown,
333 listsubrepos=True)
333 listsubrepos=True)
334 for rfiles, sfiles in zip(r, s):
334 for rfiles, sfiles in zip(r, s):
335 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
335 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
336 except error.LookupError:
336 except error.LookupError:
337 self._repo.ui.status(_("skipping missing "
337 self._repo.ui.status(_("skipping missing "
338 "subrepository: %s\n") % subpath)
338 "subrepository: %s\n") % subpath)
339
339
340 for l in r:
340 for l in r:
341 l.sort()
341 l.sort()
342
342
343 # we return a tuple to signify that this list isn't changing
343 # we return a tuple to signify that this list isn't changing
344 return tuple(r)
344 return scmutil.status(*r)
345
345
346
346
347 def makememctx(repo, parents, text, user, date, branch, files, store,
347 def makememctx(repo, parents, text, user, date, branch, files, store,
348 editor=None):
348 editor=None):
349 def getfilectx(repo, memctx, path):
349 def getfilectx(repo, memctx, path):
350 data, mode, copied = store.getfile(path)
350 data, mode, copied = store.getfile(path)
351 if data is None:
351 if data is None:
352 return None
352 return None
353 islink, isexec = mode
353 islink, isexec = mode
354 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
354 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
355 copied=copied, memctx=memctx)
355 copied=copied, memctx=memctx)
356 extra = {}
356 extra = {}
357 if branch:
357 if branch:
358 extra['branch'] = encoding.fromlocal(branch)
358 extra['branch'] = encoding.fromlocal(branch)
359 ctx = memctx(repo, parents, text, files, getfilectx, user,
359 ctx = memctx(repo, parents, text, files, getfilectx, user,
360 date, extra, editor)
360 date, extra, editor)
361 return ctx
361 return ctx
362
362
363 class changectx(basectx):
363 class changectx(basectx):
364 """A changecontext object makes access to data related to a particular
364 """A changecontext object makes access to data related to a particular
365 changeset convenient. It represents a read-only context already present in
365 changeset convenient. It represents a read-only context already present in
366 the repo."""
366 the repo."""
367 def __init__(self, repo, changeid=''):
367 def __init__(self, repo, changeid=''):
368 """changeid is a revision number, node, or tag"""
368 """changeid is a revision number, node, or tag"""
369
369
370 # since basectx.__new__ already took care of copying the object, we
370 # since basectx.__new__ already took care of copying the object, we
371 # don't need to do anything in __init__, so we just exit here
371 # don't need to do anything in __init__, so we just exit here
372 if isinstance(changeid, basectx):
372 if isinstance(changeid, basectx):
373 return
373 return
374
374
375 if changeid == '':
375 if changeid == '':
376 changeid = '.'
376 changeid = '.'
377 self._repo = repo
377 self._repo = repo
378
378
379 if isinstance(changeid, int):
379 if isinstance(changeid, int):
380 try:
380 try:
381 self._node = repo.changelog.node(changeid)
381 self._node = repo.changelog.node(changeid)
382 except IndexError:
382 except IndexError:
383 raise error.RepoLookupError(
383 raise error.RepoLookupError(
384 _("unknown revision '%s'") % changeid)
384 _("unknown revision '%s'") % changeid)
385 self._rev = changeid
385 self._rev = changeid
386 return
386 return
387 if isinstance(changeid, long):
387 if isinstance(changeid, long):
388 changeid = str(changeid)
388 changeid = str(changeid)
389 if changeid == '.':
389 if changeid == '.':
390 self._node = repo.dirstate.p1()
390 self._node = repo.dirstate.p1()
391 self._rev = repo.changelog.rev(self._node)
391 self._rev = repo.changelog.rev(self._node)
392 return
392 return
393 if changeid == 'null':
393 if changeid == 'null':
394 self._node = nullid
394 self._node = nullid
395 self._rev = nullrev
395 self._rev = nullrev
396 return
396 return
397 if changeid == 'tip':
397 if changeid == 'tip':
398 self._node = repo.changelog.tip()
398 self._node = repo.changelog.tip()
399 self._rev = repo.changelog.rev(self._node)
399 self._rev = repo.changelog.rev(self._node)
400 return
400 return
401 if len(changeid) == 20:
401 if len(changeid) == 20:
402 try:
402 try:
403 self._node = changeid
403 self._node = changeid
404 self._rev = repo.changelog.rev(changeid)
404 self._rev = repo.changelog.rev(changeid)
405 return
405 return
406 except LookupError:
406 except LookupError:
407 pass
407 pass
408
408
409 try:
409 try:
410 r = int(changeid)
410 r = int(changeid)
411 if str(r) != changeid:
411 if str(r) != changeid:
412 raise ValueError
412 raise ValueError
413 l = len(repo.changelog)
413 l = len(repo.changelog)
414 if r < 0:
414 if r < 0:
415 r += l
415 r += l
416 if r < 0 or r >= l:
416 if r < 0 or r >= l:
417 raise ValueError
417 raise ValueError
418 self._rev = r
418 self._rev = r
419 self._node = repo.changelog.node(r)
419 self._node = repo.changelog.node(r)
420 return
420 return
421 except (ValueError, OverflowError, IndexError):
421 except (ValueError, OverflowError, IndexError):
422 pass
422 pass
423
423
424 if len(changeid) == 40:
424 if len(changeid) == 40:
425 try:
425 try:
426 self._node = bin(changeid)
426 self._node = bin(changeid)
427 self._rev = repo.changelog.rev(self._node)
427 self._rev = repo.changelog.rev(self._node)
428 return
428 return
429 except (TypeError, LookupError):
429 except (TypeError, LookupError):
430 pass
430 pass
431
431
432 if changeid in repo._bookmarks:
432 if changeid in repo._bookmarks:
433 self._node = repo._bookmarks[changeid]
433 self._node = repo._bookmarks[changeid]
434 self._rev = repo.changelog.rev(self._node)
434 self._rev = repo.changelog.rev(self._node)
435 return
435 return
436 if changeid in repo._tagscache.tags:
436 if changeid in repo._tagscache.tags:
437 self._node = repo._tagscache.tags[changeid]
437 self._node = repo._tagscache.tags[changeid]
438 self._rev = repo.changelog.rev(self._node)
438 self._rev = repo.changelog.rev(self._node)
439 return
439 return
440 try:
440 try:
441 self._node = repo.branchtip(changeid)
441 self._node = repo.branchtip(changeid)
442 self._rev = repo.changelog.rev(self._node)
442 self._rev = repo.changelog.rev(self._node)
443 return
443 return
444 except error.RepoLookupError:
444 except error.RepoLookupError:
445 pass
445 pass
446
446
447 self._node = repo.changelog._partialmatch(changeid)
447 self._node = repo.changelog._partialmatch(changeid)
448 if self._node is not None:
448 if self._node is not None:
449 self._rev = repo.changelog.rev(self._node)
449 self._rev = repo.changelog.rev(self._node)
450 return
450 return
451
451
452 # lookup failed
452 # lookup failed
453 # check if it might have come from damaged dirstate
453 # check if it might have come from damaged dirstate
454 #
454 #
455 # XXX we could avoid the unfiltered if we had a recognizable exception
455 # XXX we could avoid the unfiltered if we had a recognizable exception
456 # for filtered changeset access
456 # for filtered changeset access
457 if changeid in repo.unfiltered().dirstate.parents():
457 if changeid in repo.unfiltered().dirstate.parents():
458 raise error.Abort(_("working directory has unknown parent '%s'!")
458 raise error.Abort(_("working directory has unknown parent '%s'!")
459 % short(changeid))
459 % short(changeid))
460 try:
460 try:
461 if len(changeid) == 20:
461 if len(changeid) == 20:
462 changeid = hex(changeid)
462 changeid = hex(changeid)
463 except TypeError:
463 except TypeError:
464 pass
464 pass
465 raise error.RepoLookupError(
465 raise error.RepoLookupError(
466 _("unknown revision '%s'") % changeid)
466 _("unknown revision '%s'") % changeid)
467
467
468 def __hash__(self):
468 def __hash__(self):
469 try:
469 try:
470 return hash(self._rev)
470 return hash(self._rev)
471 except AttributeError:
471 except AttributeError:
472 return id(self)
472 return id(self)
473
473
474 def __nonzero__(self):
474 def __nonzero__(self):
475 return self._rev != nullrev
475 return self._rev != nullrev
476
476
477 @propertycache
477 @propertycache
478 def _changeset(self):
478 def _changeset(self):
479 return self._repo.changelog.read(self.rev())
479 return self._repo.changelog.read(self.rev())
480
480
481 @propertycache
481 @propertycache
482 def _manifest(self):
482 def _manifest(self):
483 return self._repo.manifest.read(self._changeset[0])
483 return self._repo.manifest.read(self._changeset[0])
484
484
485 @propertycache
485 @propertycache
486 def _manifestdelta(self):
486 def _manifestdelta(self):
487 return self._repo.manifest.readdelta(self._changeset[0])
487 return self._repo.manifest.readdelta(self._changeset[0])
488
488
489 @propertycache
489 @propertycache
490 def _parents(self):
490 def _parents(self):
491 p = self._repo.changelog.parentrevs(self._rev)
491 p = self._repo.changelog.parentrevs(self._rev)
492 if p[1] == nullrev:
492 if p[1] == nullrev:
493 p = p[:-1]
493 p = p[:-1]
494 return [changectx(self._repo, x) for x in p]
494 return [changectx(self._repo, x) for x in p]
495
495
496 def changeset(self):
496 def changeset(self):
497 return self._changeset
497 return self._changeset
498 def manifestnode(self):
498 def manifestnode(self):
499 return self._changeset[0]
499 return self._changeset[0]
500
500
501 def user(self):
501 def user(self):
502 return self._changeset[1]
502 return self._changeset[1]
503 def date(self):
503 def date(self):
504 return self._changeset[2]
504 return self._changeset[2]
505 def files(self):
505 def files(self):
506 return self._changeset[3]
506 return self._changeset[3]
507 def description(self):
507 def description(self):
508 return self._changeset[4]
508 return self._changeset[4]
509 def branch(self):
509 def branch(self):
510 return encoding.tolocal(self._changeset[5].get("branch"))
510 return encoding.tolocal(self._changeset[5].get("branch"))
511 def closesbranch(self):
511 def closesbranch(self):
512 return 'close' in self._changeset[5]
512 return 'close' in self._changeset[5]
513 def extra(self):
513 def extra(self):
514 return self._changeset[5]
514 return self._changeset[5]
515 def tags(self):
515 def tags(self):
516 return self._repo.nodetags(self._node)
516 return self._repo.nodetags(self._node)
517 def bookmarks(self):
517 def bookmarks(self):
518 return self._repo.nodebookmarks(self._node)
518 return self._repo.nodebookmarks(self._node)
519 def phase(self):
519 def phase(self):
520 return self._repo._phasecache.phase(self._repo, self._rev)
520 return self._repo._phasecache.phase(self._repo, self._rev)
521 def hidden(self):
521 def hidden(self):
522 return self._rev in repoview.filterrevs(self._repo, 'visible')
522 return self._rev in repoview.filterrevs(self._repo, 'visible')
523
523
524 def children(self):
524 def children(self):
525 """return contexts for each child changeset"""
525 """return contexts for each child changeset"""
526 c = self._repo.changelog.children(self._node)
526 c = self._repo.changelog.children(self._node)
527 return [changectx(self._repo, x) for x in c]
527 return [changectx(self._repo, x) for x in c]
528
528
529 def ancestors(self):
529 def ancestors(self):
530 for a in self._repo.changelog.ancestors([self._rev]):
530 for a in self._repo.changelog.ancestors([self._rev]):
531 yield changectx(self._repo, a)
531 yield changectx(self._repo, a)
532
532
533 def descendants(self):
533 def descendants(self):
534 for d in self._repo.changelog.descendants([self._rev]):
534 for d in self._repo.changelog.descendants([self._rev]):
535 yield changectx(self._repo, d)
535 yield changectx(self._repo, d)
536
536
537 def filectx(self, path, fileid=None, filelog=None):
537 def filectx(self, path, fileid=None, filelog=None):
538 """get a file context from this changeset"""
538 """get a file context from this changeset"""
539 if fileid is None:
539 if fileid is None:
540 fileid = self.filenode(path)
540 fileid = self.filenode(path)
541 return filectx(self._repo, path, fileid=fileid,
541 return filectx(self._repo, path, fileid=fileid,
542 changectx=self, filelog=filelog)
542 changectx=self, filelog=filelog)
543
543
544 def ancestor(self, c2, warn=False):
544 def ancestor(self, c2, warn=False):
545 """return the "best" ancestor context of self and c2
545 """return the "best" ancestor context of self and c2
546
546
547 If there are multiple candidates, it will show a message and check
547 If there are multiple candidates, it will show a message and check
548 merge.preferancestor configuration before falling back to the
548 merge.preferancestor configuration before falling back to the
549 revlog ancestor."""
549 revlog ancestor."""
550 # deal with workingctxs
550 # deal with workingctxs
551 n2 = c2._node
551 n2 = c2._node
552 if n2 is None:
552 if n2 is None:
553 n2 = c2._parents[0]._node
553 n2 = c2._parents[0]._node
554 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
554 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
555 if not cahs:
555 if not cahs:
556 anc = nullid
556 anc = nullid
557 elif len(cahs) == 1:
557 elif len(cahs) == 1:
558 anc = cahs[0]
558 anc = cahs[0]
559 else:
559 else:
560 for r in self._repo.ui.configlist('merge', 'preferancestor'):
560 for r in self._repo.ui.configlist('merge', 'preferancestor'):
561 try:
561 try:
562 ctx = changectx(self._repo, r)
562 ctx = changectx(self._repo, r)
563 except error.RepoLookupError:
563 except error.RepoLookupError:
564 continue
564 continue
565 anc = ctx.node()
565 anc = ctx.node()
566 if anc in cahs:
566 if anc in cahs:
567 break
567 break
568 else:
568 else:
569 anc = self._repo.changelog.ancestor(self._node, n2)
569 anc = self._repo.changelog.ancestor(self._node, n2)
570 if warn:
570 if warn:
571 self._repo.ui.status(
571 self._repo.ui.status(
572 (_("note: using %s as ancestor of %s and %s\n") %
572 (_("note: using %s as ancestor of %s and %s\n") %
573 (short(anc), short(self._node), short(n2))) +
573 (short(anc), short(self._node), short(n2))) +
574 ''.join(_(" alternatively, use --config "
574 ''.join(_(" alternatively, use --config "
575 "merge.preferancestor=%s\n") %
575 "merge.preferancestor=%s\n") %
576 short(n) for n in sorted(cahs) if n != anc))
576 short(n) for n in sorted(cahs) if n != anc))
577 return changectx(self._repo, anc)
577 return changectx(self._repo, anc)
578
578
579 def descendant(self, other):
579 def descendant(self, other):
580 """True if other is descendant of this changeset"""
580 """True if other is descendant of this changeset"""
581 return self._repo.changelog.descendant(self._rev, other._rev)
581 return self._repo.changelog.descendant(self._rev, other._rev)
582
582
583 def walk(self, match):
583 def walk(self, match):
584 fset = set(match.files())
584 fset = set(match.files())
585 # for dirstate.walk, files=['.'] means "walk the whole tree".
585 # for dirstate.walk, files=['.'] means "walk the whole tree".
586 # follow that here, too
586 # follow that here, too
587 fset.discard('.')
587 fset.discard('.')
588
588
589 # avoid the entire walk if we're only looking for specific files
589 # avoid the entire walk if we're only looking for specific files
590 if fset and not match.anypats():
590 if fset and not match.anypats():
591 if util.all([fn in self for fn in fset]):
591 if util.all([fn in self for fn in fset]):
592 for fn in sorted(fset):
592 for fn in sorted(fset):
593 if match(fn):
593 if match(fn):
594 yield fn
594 yield fn
595 raise StopIteration
595 raise StopIteration
596
596
597 for fn in self:
597 for fn in self:
598 if fn in fset:
598 if fn in fset:
599 # specified pattern is the exact name
599 # specified pattern is the exact name
600 fset.remove(fn)
600 fset.remove(fn)
601 if match(fn):
601 if match(fn):
602 yield fn
602 yield fn
603 for fn in sorted(fset):
603 for fn in sorted(fset):
604 if fn in self._dirs:
604 if fn in self._dirs:
605 # specified pattern is a directory
605 # specified pattern is a directory
606 continue
606 continue
607 match.bad(fn, _('no such file in rev %s') % self)
607 match.bad(fn, _('no such file in rev %s') % self)
608
608
609 def matches(self, match):
609 def matches(self, match):
610 return self.walk(match)
610 return self.walk(match)
611
611
612 class basefilectx(object):
612 class basefilectx(object):
613 """A filecontext object represents the common logic for its children:
613 """A filecontext object represents the common logic for its children:
614 filectx: read-only access to a filerevision that is already present
614 filectx: read-only access to a filerevision that is already present
615 in the repo,
615 in the repo,
616 workingfilectx: a filecontext that represents files from the working
616 workingfilectx: a filecontext that represents files from the working
617 directory,
617 directory,
618 memfilectx: a filecontext that represents files in-memory."""
618 memfilectx: a filecontext that represents files in-memory."""
619 def __new__(cls, repo, path, *args, **kwargs):
619 def __new__(cls, repo, path, *args, **kwargs):
620 return super(basefilectx, cls).__new__(cls)
620 return super(basefilectx, cls).__new__(cls)
621
621
622 @propertycache
622 @propertycache
623 def _filelog(self):
623 def _filelog(self):
624 return self._repo.file(self._path)
624 return self._repo.file(self._path)
625
625
626 @propertycache
626 @propertycache
627 def _changeid(self):
627 def _changeid(self):
628 if '_changeid' in self.__dict__:
628 if '_changeid' in self.__dict__:
629 return self._changeid
629 return self._changeid
630 elif '_changectx' in self.__dict__:
630 elif '_changectx' in self.__dict__:
631 return self._changectx.rev()
631 return self._changectx.rev()
632 else:
632 else:
633 return self._filelog.linkrev(self._filerev)
633 return self._filelog.linkrev(self._filerev)
634
634
635 @propertycache
635 @propertycache
636 def _filenode(self):
636 def _filenode(self):
637 if '_fileid' in self.__dict__:
637 if '_fileid' in self.__dict__:
638 return self._filelog.lookup(self._fileid)
638 return self._filelog.lookup(self._fileid)
639 else:
639 else:
640 return self._changectx.filenode(self._path)
640 return self._changectx.filenode(self._path)
641
641
642 @propertycache
642 @propertycache
643 def _filerev(self):
643 def _filerev(self):
644 return self._filelog.rev(self._filenode)
644 return self._filelog.rev(self._filenode)
645
645
646 @propertycache
646 @propertycache
647 def _repopath(self):
647 def _repopath(self):
648 return self._path
648 return self._path
649
649
650 def __nonzero__(self):
650 def __nonzero__(self):
651 try:
651 try:
652 self._filenode
652 self._filenode
653 return True
653 return True
654 except error.LookupError:
654 except error.LookupError:
655 # file is missing
655 # file is missing
656 return False
656 return False
657
657
658 def __str__(self):
658 def __str__(self):
659 return "%s@%s" % (self.path(), self._changectx)
659 return "%s@%s" % (self.path(), self._changectx)
660
660
661 def __repr__(self):
661 def __repr__(self):
662 return "<%s %s>" % (type(self).__name__, str(self))
662 return "<%s %s>" % (type(self).__name__, str(self))
663
663
664 def __hash__(self):
664 def __hash__(self):
665 try:
665 try:
666 return hash((self._path, self._filenode))
666 return hash((self._path, self._filenode))
667 except AttributeError:
667 except AttributeError:
668 return id(self)
668 return id(self)
669
669
670 def __eq__(self, other):
670 def __eq__(self, other):
671 try:
671 try:
672 return (type(self) == type(other) and self._path == other._path
672 return (type(self) == type(other) and self._path == other._path
673 and self._filenode == other._filenode)
673 and self._filenode == other._filenode)
674 except AttributeError:
674 except AttributeError:
675 return False
675 return False
676
676
677 def __ne__(self, other):
677 def __ne__(self, other):
678 return not (self == other)
678 return not (self == other)
679
679
680 def filerev(self):
680 def filerev(self):
681 return self._filerev
681 return self._filerev
682 def filenode(self):
682 def filenode(self):
683 return self._filenode
683 return self._filenode
684 def flags(self):
684 def flags(self):
685 return self._changectx.flags(self._path)
685 return self._changectx.flags(self._path)
686 def filelog(self):
686 def filelog(self):
687 return self._filelog
687 return self._filelog
688 def rev(self):
688 def rev(self):
689 return self._changeid
689 return self._changeid
690 def linkrev(self):
690 def linkrev(self):
691 return self._filelog.linkrev(self._filerev)
691 return self._filelog.linkrev(self._filerev)
692 def node(self):
692 def node(self):
693 return self._changectx.node()
693 return self._changectx.node()
694 def hex(self):
694 def hex(self):
695 return self._changectx.hex()
695 return self._changectx.hex()
696 def user(self):
696 def user(self):
697 return self._changectx.user()
697 return self._changectx.user()
698 def date(self):
698 def date(self):
699 return self._changectx.date()
699 return self._changectx.date()
700 def files(self):
700 def files(self):
701 return self._changectx.files()
701 return self._changectx.files()
702 def description(self):
702 def description(self):
703 return self._changectx.description()
703 return self._changectx.description()
704 def branch(self):
704 def branch(self):
705 return self._changectx.branch()
705 return self._changectx.branch()
706 def extra(self):
706 def extra(self):
707 return self._changectx.extra()
707 return self._changectx.extra()
708 def phase(self):
708 def phase(self):
709 return self._changectx.phase()
709 return self._changectx.phase()
710 def phasestr(self):
710 def phasestr(self):
711 return self._changectx.phasestr()
711 return self._changectx.phasestr()
712 def manifest(self):
712 def manifest(self):
713 return self._changectx.manifest()
713 return self._changectx.manifest()
714 def changectx(self):
714 def changectx(self):
715 return self._changectx
715 return self._changectx
716
716
717 def path(self):
717 def path(self):
718 return self._path
718 return self._path
719
719
720 def isbinary(self):
720 def isbinary(self):
721 try:
721 try:
722 return util.binary(self.data())
722 return util.binary(self.data())
723 except IOError:
723 except IOError:
724 return False
724 return False
725 def isexec(self):
725 def isexec(self):
726 return 'x' in self.flags()
726 return 'x' in self.flags()
727 def islink(self):
727 def islink(self):
728 return 'l' in self.flags()
728 return 'l' in self.flags()
729
729
730 def cmp(self, fctx):
730 def cmp(self, fctx):
731 """compare with other file context
731 """compare with other file context
732
732
733 returns True if different than fctx.
733 returns True if different than fctx.
734 """
734 """
735 if (fctx._filerev is None
735 if (fctx._filerev is None
736 and (self._repo._encodefilterpats
736 and (self._repo._encodefilterpats
737 # if file data starts with '\1\n', empty metadata block is
737 # if file data starts with '\1\n', empty metadata block is
738 # prepended, which adds 4 bytes to filelog.size().
738 # prepended, which adds 4 bytes to filelog.size().
739 or self.size() - 4 == fctx.size())
739 or self.size() - 4 == fctx.size())
740 or self.size() == fctx.size()):
740 or self.size() == fctx.size()):
741 return self._filelog.cmp(self._filenode, fctx.data())
741 return self._filelog.cmp(self._filenode, fctx.data())
742
742
743 return True
743 return True
744
744
745 def parents(self):
745 def parents(self):
746 _path = self._path
746 _path = self._path
747 fl = self._filelog
747 fl = self._filelog
748 pl = [(_path, n, fl) for n in self._filelog.parents(self._filenode)]
748 pl = [(_path, n, fl) for n in self._filelog.parents(self._filenode)]
749
749
750 r = self._filelog.renamed(self._filenode)
750 r = self._filelog.renamed(self._filenode)
751 if r:
751 if r:
752 pl[0] = (r[0], r[1], None)
752 pl[0] = (r[0], r[1], None)
753
753
754 return [filectx(self._repo, p, fileid=n, filelog=l)
754 return [filectx(self._repo, p, fileid=n, filelog=l)
755 for p, n, l in pl if n != nullid]
755 for p, n, l in pl if n != nullid]
756
756
757 def p1(self):
757 def p1(self):
758 return self.parents()[0]
758 return self.parents()[0]
759
759
760 def p2(self):
760 def p2(self):
761 p = self.parents()
761 p = self.parents()
762 if len(p) == 2:
762 if len(p) == 2:
763 return p[1]
763 return p[1]
764 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
764 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
765
765
766 def annotate(self, follow=False, linenumber=None, diffopts=None):
766 def annotate(self, follow=False, linenumber=None, diffopts=None):
767 '''returns a list of tuples of (ctx, line) for each line
767 '''returns a list of tuples of (ctx, line) for each line
768 in the file, where ctx is the filectx of the node where
768 in the file, where ctx is the filectx of the node where
769 that line was last changed.
769 that line was last changed.
770 This returns tuples of ((ctx, linenumber), line) for each line,
770 This returns tuples of ((ctx, linenumber), line) for each line,
771 if "linenumber" parameter is NOT "None".
771 if "linenumber" parameter is NOT "None".
772 In such tuples, linenumber means one at the first appearance
772 In such tuples, linenumber means one at the first appearance
773 in the managed file.
773 in the managed file.
774 To reduce annotation cost,
774 To reduce annotation cost,
775 this returns fixed value(False is used) as linenumber,
775 this returns fixed value(False is used) as linenumber,
776 if "linenumber" parameter is "False".'''
776 if "linenumber" parameter is "False".'''
777
777
778 if linenumber is None:
778 if linenumber is None:
779 def decorate(text, rev):
779 def decorate(text, rev):
780 return ([rev] * len(text.splitlines()), text)
780 return ([rev] * len(text.splitlines()), text)
781 elif linenumber:
781 elif linenumber:
782 def decorate(text, rev):
782 def decorate(text, rev):
783 size = len(text.splitlines())
783 size = len(text.splitlines())
784 return ([(rev, i) for i in xrange(1, size + 1)], text)
784 return ([(rev, i) for i in xrange(1, size + 1)], text)
785 else:
785 else:
786 def decorate(text, rev):
786 def decorate(text, rev):
787 return ([(rev, False)] * len(text.splitlines()), text)
787 return ([(rev, False)] * len(text.splitlines()), text)
788
788
789 def pair(parent, child):
789 def pair(parent, child):
790 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
790 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
791 refine=True)
791 refine=True)
792 for (a1, a2, b1, b2), t in blocks:
792 for (a1, a2, b1, b2), t in blocks:
793 # Changed blocks ('!') or blocks made only of blank lines ('~')
793 # Changed blocks ('!') or blocks made only of blank lines ('~')
794 # belong to the child.
794 # belong to the child.
795 if t == '=':
795 if t == '=':
796 child[0][b1:b2] = parent[0][a1:a2]
796 child[0][b1:b2] = parent[0][a1:a2]
797 return child
797 return child
798
798
799 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
799 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
800
800
801 def parents(f):
801 def parents(f):
802 pl = f.parents()
802 pl = f.parents()
803
803
804 # Don't return renamed parents if we aren't following.
804 # Don't return renamed parents if we aren't following.
805 if not follow:
805 if not follow:
806 pl = [p for p in pl if p.path() == f.path()]
806 pl = [p for p in pl if p.path() == f.path()]
807
807
808 # renamed filectx won't have a filelog yet, so set it
808 # renamed filectx won't have a filelog yet, so set it
809 # from the cache to save time
809 # from the cache to save time
810 for p in pl:
810 for p in pl:
811 if not '_filelog' in p.__dict__:
811 if not '_filelog' in p.__dict__:
812 p._filelog = getlog(p.path())
812 p._filelog = getlog(p.path())
813
813
814 return pl
814 return pl
815
815
816 # use linkrev to find the first changeset where self appeared
816 # use linkrev to find the first changeset where self appeared
817 if self.rev() != self.linkrev():
817 if self.rev() != self.linkrev():
818 base = self.filectx(self.filenode())
818 base = self.filectx(self.filenode())
819 else:
819 else:
820 base = self
820 base = self
821
821
822 # This algorithm would prefer to be recursive, but Python is a
822 # This algorithm would prefer to be recursive, but Python is a
823 # bit recursion-hostile. Instead we do an iterative
823 # bit recursion-hostile. Instead we do an iterative
824 # depth-first search.
824 # depth-first search.
825
825
826 visit = [base]
826 visit = [base]
827 hist = {}
827 hist = {}
828 pcache = {}
828 pcache = {}
829 needed = {base: 1}
829 needed = {base: 1}
830 while visit:
830 while visit:
831 f = visit[-1]
831 f = visit[-1]
832 pcached = f in pcache
832 pcached = f in pcache
833 if not pcached:
833 if not pcached:
834 pcache[f] = parents(f)
834 pcache[f] = parents(f)
835
835
836 ready = True
836 ready = True
837 pl = pcache[f]
837 pl = pcache[f]
838 for p in pl:
838 for p in pl:
839 if p not in hist:
839 if p not in hist:
840 ready = False
840 ready = False
841 visit.append(p)
841 visit.append(p)
842 if not pcached:
842 if not pcached:
843 needed[p] = needed.get(p, 0) + 1
843 needed[p] = needed.get(p, 0) + 1
844 if ready:
844 if ready:
845 visit.pop()
845 visit.pop()
846 reusable = f in hist
846 reusable = f in hist
847 if reusable:
847 if reusable:
848 curr = hist[f]
848 curr = hist[f]
849 else:
849 else:
850 curr = decorate(f.data(), f)
850 curr = decorate(f.data(), f)
851 for p in pl:
851 for p in pl:
852 if not reusable:
852 if not reusable:
853 curr = pair(hist[p], curr)
853 curr = pair(hist[p], curr)
854 if needed[p] == 1:
854 if needed[p] == 1:
855 del hist[p]
855 del hist[p]
856 del needed[p]
856 del needed[p]
857 else:
857 else:
858 needed[p] -= 1
858 needed[p] -= 1
859
859
860 hist[f] = curr
860 hist[f] = curr
861 pcache[f] = []
861 pcache[f] = []
862
862
863 return zip(hist[base][0], hist[base][1].splitlines(True))
863 return zip(hist[base][0], hist[base][1].splitlines(True))
864
864
865 def ancestors(self, followfirst=False):
865 def ancestors(self, followfirst=False):
866 visit = {}
866 visit = {}
867 c = self
867 c = self
868 cut = followfirst and 1 or None
868 cut = followfirst and 1 or None
869 while True:
869 while True:
870 for parent in c.parents()[:cut]:
870 for parent in c.parents()[:cut]:
871 visit[(parent.rev(), parent.node())] = parent
871 visit[(parent.rev(), parent.node())] = parent
872 if not visit:
872 if not visit:
873 break
873 break
874 c = visit.pop(max(visit))
874 c = visit.pop(max(visit))
875 yield c
875 yield c
876
876
877 class filectx(basefilectx):
877 class filectx(basefilectx):
878 """A filecontext object makes access to data related to a particular
878 """A filecontext object makes access to data related to a particular
879 filerevision convenient."""
879 filerevision convenient."""
880 def __init__(self, repo, path, changeid=None, fileid=None,
880 def __init__(self, repo, path, changeid=None, fileid=None,
881 filelog=None, changectx=None):
881 filelog=None, changectx=None):
882 """changeid can be a changeset revision, node, or tag.
882 """changeid can be a changeset revision, node, or tag.
883 fileid can be a file revision or node."""
883 fileid can be a file revision or node."""
884 self._repo = repo
884 self._repo = repo
885 self._path = path
885 self._path = path
886
886
887 assert (changeid is not None
887 assert (changeid is not None
888 or fileid is not None
888 or fileid is not None
889 or changectx is not None), \
889 or changectx is not None), \
890 ("bad args: changeid=%r, fileid=%r, changectx=%r"
890 ("bad args: changeid=%r, fileid=%r, changectx=%r"
891 % (changeid, fileid, changectx))
891 % (changeid, fileid, changectx))
892
892
893 if filelog is not None:
893 if filelog is not None:
894 self._filelog = filelog
894 self._filelog = filelog
895
895
896 if changeid is not None:
896 if changeid is not None:
897 self._changeid = changeid
897 self._changeid = changeid
898 if changectx is not None:
898 if changectx is not None:
899 self._changectx = changectx
899 self._changectx = changectx
900 if fileid is not None:
900 if fileid is not None:
901 self._fileid = fileid
901 self._fileid = fileid
902
902
903 @propertycache
903 @propertycache
904 def _changectx(self):
904 def _changectx(self):
905 try:
905 try:
906 return changectx(self._repo, self._changeid)
906 return changectx(self._repo, self._changeid)
907 except error.RepoLookupError:
907 except error.RepoLookupError:
908 # Linkrev may point to any revision in the repository. When the
908 # Linkrev may point to any revision in the repository. When the
909 # repository is filtered this may lead to `filectx` trying to build
909 # repository is filtered this may lead to `filectx` trying to build
910 # `changectx` for filtered revision. In such case we fallback to
910 # `changectx` for filtered revision. In such case we fallback to
911 # creating `changectx` on the unfiltered version of the reposition.
911 # creating `changectx` on the unfiltered version of the reposition.
912 # This fallback should not be an issue because `changectx` from
912 # This fallback should not be an issue because `changectx` from
913 # `filectx` are not used in complex operations that care about
913 # `filectx` are not used in complex operations that care about
914 # filtering.
914 # filtering.
915 #
915 #
916 # This fallback is a cheap and dirty fix that prevent several
916 # This fallback is a cheap and dirty fix that prevent several
917 # crashes. It does not ensure the behavior is correct. However the
917 # crashes. It does not ensure the behavior is correct. However the
918 # behavior was not correct before filtering either and "incorrect
918 # behavior was not correct before filtering either and "incorrect
919 # behavior" is seen as better as "crash"
919 # behavior" is seen as better as "crash"
920 #
920 #
921 # Linkrevs have several serious troubles with filtering that are
921 # Linkrevs have several serious troubles with filtering that are
922 # complicated to solve. Proper handling of the issue here should be
922 # complicated to solve. Proper handling of the issue here should be
923 # considered when solving linkrev issue are on the table.
923 # considered when solving linkrev issue are on the table.
924 return changectx(self._repo.unfiltered(), self._changeid)
924 return changectx(self._repo.unfiltered(), self._changeid)
925
925
926 def filectx(self, fileid):
926 def filectx(self, fileid):
927 '''opens an arbitrary revision of the file without
927 '''opens an arbitrary revision of the file without
928 opening a new filelog'''
928 opening a new filelog'''
929 return filectx(self._repo, self._path, fileid=fileid,
929 return filectx(self._repo, self._path, fileid=fileid,
930 filelog=self._filelog)
930 filelog=self._filelog)
931
931
932 def data(self):
932 def data(self):
933 return self._filelog.read(self._filenode)
933 return self._filelog.read(self._filenode)
934 def size(self):
934 def size(self):
935 return self._filelog.size(self._filerev)
935 return self._filelog.size(self._filerev)
936
936
937 def renamed(self):
937 def renamed(self):
938 """check if file was actually renamed in this changeset revision
938 """check if file was actually renamed in this changeset revision
939
939
940 If rename logged in file revision, we report copy for changeset only
940 If rename logged in file revision, we report copy for changeset only
941 if file revisions linkrev points back to the changeset in question
941 if file revisions linkrev points back to the changeset in question
942 or both changeset parents contain different file revisions.
942 or both changeset parents contain different file revisions.
943 """
943 """
944
944
945 renamed = self._filelog.renamed(self._filenode)
945 renamed = self._filelog.renamed(self._filenode)
946 if not renamed:
946 if not renamed:
947 return renamed
947 return renamed
948
948
949 if self.rev() == self.linkrev():
949 if self.rev() == self.linkrev():
950 return renamed
950 return renamed
951
951
952 name = self.path()
952 name = self.path()
953 fnode = self._filenode
953 fnode = self._filenode
954 for p in self._changectx.parents():
954 for p in self._changectx.parents():
955 try:
955 try:
956 if fnode == p.filenode(name):
956 if fnode == p.filenode(name):
957 return None
957 return None
958 except error.LookupError:
958 except error.LookupError:
959 pass
959 pass
960 return renamed
960 return renamed
961
961
962 def children(self):
962 def children(self):
963 # hard for renames
963 # hard for renames
964 c = self._filelog.children(self._filenode)
964 c = self._filelog.children(self._filenode)
965 return [filectx(self._repo, self._path, fileid=x,
965 return [filectx(self._repo, self._path, fileid=x,
966 filelog=self._filelog) for x in c]
966 filelog=self._filelog) for x in c]
967
967
968 class committablectx(basectx):
968 class committablectx(basectx):
969 """A committablectx object provides common functionality for a context that
969 """A committablectx object provides common functionality for a context that
970 wants the ability to commit, e.g. workingctx or memctx."""
970 wants the ability to commit, e.g. workingctx or memctx."""
971 def __init__(self, repo, text="", user=None, date=None, extra=None,
971 def __init__(self, repo, text="", user=None, date=None, extra=None,
972 changes=None):
972 changes=None):
973 self._repo = repo
973 self._repo = repo
974 self._rev = None
974 self._rev = None
975 self._node = None
975 self._node = None
976 self._text = text
976 self._text = text
977 if date:
977 if date:
978 self._date = util.parsedate(date)
978 self._date = util.parsedate(date)
979 if user:
979 if user:
980 self._user = user
980 self._user = user
981 if changes:
981 if changes:
982 self._status = changes
982 self._status = changes
983
983
984 self._extra = {}
984 self._extra = {}
985 if extra:
985 if extra:
986 self._extra = extra.copy()
986 self._extra = extra.copy()
987 if 'branch' not in self._extra:
987 if 'branch' not in self._extra:
988 try:
988 try:
989 branch = encoding.fromlocal(self._repo.dirstate.branch())
989 branch = encoding.fromlocal(self._repo.dirstate.branch())
990 except UnicodeDecodeError:
990 except UnicodeDecodeError:
991 raise util.Abort(_('branch name not in UTF-8!'))
991 raise util.Abort(_('branch name not in UTF-8!'))
992 self._extra['branch'] = branch
992 self._extra['branch'] = branch
993 if self._extra['branch'] == '':
993 if self._extra['branch'] == '':
994 self._extra['branch'] = 'default'
994 self._extra['branch'] = 'default'
995
995
996 def __str__(self):
996 def __str__(self):
997 return str(self._parents[0]) + "+"
997 return str(self._parents[0]) + "+"
998
998
999 def __nonzero__(self):
999 def __nonzero__(self):
1000 return True
1000 return True
1001
1001
1002 def _buildflagfunc(self):
1002 def _buildflagfunc(self):
1003 # Create a fallback function for getting file flags when the
1003 # Create a fallback function for getting file flags when the
1004 # filesystem doesn't support them
1004 # filesystem doesn't support them
1005
1005
1006 copiesget = self._repo.dirstate.copies().get
1006 copiesget = self._repo.dirstate.copies().get
1007
1007
1008 if len(self._parents) < 2:
1008 if len(self._parents) < 2:
1009 # when we have one parent, it's easy: copy from parent
1009 # when we have one parent, it's easy: copy from parent
1010 man = self._parents[0].manifest()
1010 man = self._parents[0].manifest()
1011 def func(f):
1011 def func(f):
1012 f = copiesget(f, f)
1012 f = copiesget(f, f)
1013 return man.flags(f)
1013 return man.flags(f)
1014 else:
1014 else:
1015 # merges are tricky: we try to reconstruct the unstored
1015 # merges are tricky: we try to reconstruct the unstored
1016 # result from the merge (issue1802)
1016 # result from the merge (issue1802)
1017 p1, p2 = self._parents
1017 p1, p2 = self._parents
1018 pa = p1.ancestor(p2)
1018 pa = p1.ancestor(p2)
1019 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1019 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1020
1020
1021 def func(f):
1021 def func(f):
1022 f = copiesget(f, f) # may be wrong for merges with copies
1022 f = copiesget(f, f) # may be wrong for merges with copies
1023 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1023 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1024 if fl1 == fl2:
1024 if fl1 == fl2:
1025 return fl1
1025 return fl1
1026 if fl1 == fla:
1026 if fl1 == fla:
1027 return fl2
1027 return fl2
1028 if fl2 == fla:
1028 if fl2 == fla:
1029 return fl1
1029 return fl1
1030 return '' # punt for conflicts
1030 return '' # punt for conflicts
1031
1031
1032 return func
1032 return func
1033
1033
1034 @propertycache
1034 @propertycache
1035 def _flagfunc(self):
1035 def _flagfunc(self):
1036 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1036 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1037
1037
1038 @propertycache
1038 @propertycache
1039 def _manifest(self):
1039 def _manifest(self):
1040 """generate a manifest corresponding to the values in self._status"""
1040 """generate a manifest corresponding to the values in self._status"""
1041
1041
1042 man = self._parents[0].manifest().copy()
1042 man = self._parents[0].manifest().copy()
1043 if len(self._parents) > 1:
1043 if len(self._parents) > 1:
1044 man2 = self.p2().manifest()
1044 man2 = self.p2().manifest()
1045 def getman(f):
1045 def getman(f):
1046 if f in man:
1046 if f in man:
1047 return man
1047 return man
1048 return man2
1048 return man2
1049 else:
1049 else:
1050 getman = lambda f: man
1050 getman = lambda f: man
1051
1051
1052 copied = self._repo.dirstate.copies()
1052 copied = self._repo.dirstate.copies()
1053 ff = self._flagfunc
1053 ff = self._flagfunc
1054 modified, added, removed, deleted = self._status[:4]
1054 modified, added, removed, deleted = self._status[:4]
1055 for i, l in (("a", added), ("m", modified)):
1055 for i, l in (("a", added), ("m", modified)):
1056 for f in l:
1056 for f in l:
1057 orig = copied.get(f, f)
1057 orig = copied.get(f, f)
1058 man[f] = getman(orig).get(orig, nullid) + i
1058 man[f] = getman(orig).get(orig, nullid) + i
1059 try:
1059 try:
1060 man.set(f, ff(f))
1060 man.set(f, ff(f))
1061 except OSError:
1061 except OSError:
1062 pass
1062 pass
1063
1063
1064 for f in deleted + removed:
1064 for f in deleted + removed:
1065 if f in man:
1065 if f in man:
1066 del man[f]
1066 del man[f]
1067
1067
1068 return man
1068 return man
1069
1069
1070 @propertycache
1070 @propertycache
1071 def _status(self):
1071 def _status(self):
1072 return self._repo.status()
1072 return self._repo.status()
1073
1073
1074 @propertycache
1074 @propertycache
1075 def _user(self):
1075 def _user(self):
1076 return self._repo.ui.username()
1076 return self._repo.ui.username()
1077
1077
1078 @propertycache
1078 @propertycache
1079 def _date(self):
1079 def _date(self):
1080 return util.makedate()
1080 return util.makedate()
1081
1081
1082 def subrev(self, subpath):
1082 def subrev(self, subpath):
1083 return None
1083 return None
1084
1084
1085 def user(self):
1085 def user(self):
1086 return self._user or self._repo.ui.username()
1086 return self._user or self._repo.ui.username()
1087 def date(self):
1087 def date(self):
1088 return self._date
1088 return self._date
1089 def description(self):
1089 def description(self):
1090 return self._text
1090 return self._text
1091 def files(self):
1091 def files(self):
1092 return sorted(self._status[0] + self._status[1] + self._status[2])
1092 return sorted(self._status[0] + self._status[1] + self._status[2])
1093
1093
1094 def modified(self):
1094 def modified(self):
1095 return self._status[0]
1095 return self._status[0]
1096 def added(self):
1096 def added(self):
1097 return self._status[1]
1097 return self._status[1]
1098 def removed(self):
1098 def removed(self):
1099 return self._status[2]
1099 return self._status[2]
1100 def deleted(self):
1100 def deleted(self):
1101 return self._status[3]
1101 return self._status[3]
1102 def unknown(self):
1102 def unknown(self):
1103 return self._status[4]
1103 return self._status[4]
1104 def ignored(self):
1104 def ignored(self):
1105 return self._status[5]
1105 return self._status[5]
1106 def clean(self):
1106 def clean(self):
1107 return self._status[6]
1107 return self._status[6]
1108 def branch(self):
1108 def branch(self):
1109 return encoding.tolocal(self._extra['branch'])
1109 return encoding.tolocal(self._extra['branch'])
1110 def closesbranch(self):
1110 def closesbranch(self):
1111 return 'close' in self._extra
1111 return 'close' in self._extra
1112 def extra(self):
1112 def extra(self):
1113 return self._extra
1113 return self._extra
1114
1114
1115 def tags(self):
1115 def tags(self):
1116 t = []
1116 t = []
1117 for p in self.parents():
1117 for p in self.parents():
1118 t.extend(p.tags())
1118 t.extend(p.tags())
1119 return t
1119 return t
1120
1120
1121 def bookmarks(self):
1121 def bookmarks(self):
1122 b = []
1122 b = []
1123 for p in self.parents():
1123 for p in self.parents():
1124 b.extend(p.bookmarks())
1124 b.extend(p.bookmarks())
1125 return b
1125 return b
1126
1126
1127 def phase(self):
1127 def phase(self):
1128 phase = phases.draft # default phase to draft
1128 phase = phases.draft # default phase to draft
1129 for p in self.parents():
1129 for p in self.parents():
1130 phase = max(phase, p.phase())
1130 phase = max(phase, p.phase())
1131 return phase
1131 return phase
1132
1132
1133 def hidden(self):
1133 def hidden(self):
1134 return False
1134 return False
1135
1135
1136 def children(self):
1136 def children(self):
1137 return []
1137 return []
1138
1138
1139 def flags(self, path):
1139 def flags(self, path):
1140 if '_manifest' in self.__dict__:
1140 if '_manifest' in self.__dict__:
1141 try:
1141 try:
1142 return self._manifest.flags(path)
1142 return self._manifest.flags(path)
1143 except KeyError:
1143 except KeyError:
1144 return ''
1144 return ''
1145
1145
1146 try:
1146 try:
1147 return self._flagfunc(path)
1147 return self._flagfunc(path)
1148 except OSError:
1148 except OSError:
1149 return ''
1149 return ''
1150
1150
1151 def ancestor(self, c2):
1151 def ancestor(self, c2):
1152 """return the "best" ancestor context of self and c2"""
1152 """return the "best" ancestor context of self and c2"""
1153 return self._parents[0].ancestor(c2) # punt on two parents for now
1153 return self._parents[0].ancestor(c2) # punt on two parents for now
1154
1154
1155 def walk(self, match):
1155 def walk(self, match):
1156 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1156 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1157 True, False))
1157 True, False))
1158
1158
1159 def matches(self, match):
1159 def matches(self, match):
1160 return sorted(self._repo.dirstate.matches(match))
1160 return sorted(self._repo.dirstate.matches(match))
1161
1161
1162 def ancestors(self):
1162 def ancestors(self):
1163 for a in self._repo.changelog.ancestors(
1163 for a in self._repo.changelog.ancestors(
1164 [p.rev() for p in self._parents]):
1164 [p.rev() for p in self._parents]):
1165 yield changectx(self._repo, a)
1165 yield changectx(self._repo, a)
1166
1166
1167 def markcommitted(self, node):
1167 def markcommitted(self, node):
1168 """Perform post-commit cleanup necessary after committing this ctx
1168 """Perform post-commit cleanup necessary after committing this ctx
1169
1169
1170 Specifically, this updates backing stores this working context
1170 Specifically, this updates backing stores this working context
1171 wraps to reflect the fact that the changes reflected by this
1171 wraps to reflect the fact that the changes reflected by this
1172 workingctx have been committed. For example, it marks
1172 workingctx have been committed. For example, it marks
1173 modified and added files as normal in the dirstate.
1173 modified and added files as normal in the dirstate.
1174
1174
1175 """
1175 """
1176
1176
1177 self._repo.dirstate.beginparentchange()
1177 self._repo.dirstate.beginparentchange()
1178 for f in self.modified() + self.added():
1178 for f in self.modified() + self.added():
1179 self._repo.dirstate.normal(f)
1179 self._repo.dirstate.normal(f)
1180 for f in self.removed():
1180 for f in self.removed():
1181 self._repo.dirstate.drop(f)
1181 self._repo.dirstate.drop(f)
1182 self._repo.dirstate.setparents(node)
1182 self._repo.dirstate.setparents(node)
1183 self._repo.dirstate.endparentchange()
1183 self._repo.dirstate.endparentchange()
1184
1184
1185 def dirs(self):
1185 def dirs(self):
1186 return self._repo.dirstate.dirs()
1186 return self._repo.dirstate.dirs()
1187
1187
1188 class workingctx(committablectx):
1188 class workingctx(committablectx):
1189 """A workingctx object makes access to data related to
1189 """A workingctx object makes access to data related to
1190 the current working directory convenient.
1190 the current working directory convenient.
1191 date - any valid date string or (unixtime, offset), or None.
1191 date - any valid date string or (unixtime, offset), or None.
1192 user - username string, or None.
1192 user - username string, or None.
1193 extra - a dictionary of extra values, or None.
1193 extra - a dictionary of extra values, or None.
1194 changes - a list of file lists as returned by localrepo.status()
1194 changes - a list of file lists as returned by localrepo.status()
1195 or None to use the repository status.
1195 or None to use the repository status.
1196 """
1196 """
1197 def __init__(self, repo, text="", user=None, date=None, extra=None,
1197 def __init__(self, repo, text="", user=None, date=None, extra=None,
1198 changes=None):
1198 changes=None):
1199 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1199 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1200
1200
1201 def __iter__(self):
1201 def __iter__(self):
1202 d = self._repo.dirstate
1202 d = self._repo.dirstate
1203 for f in d:
1203 for f in d:
1204 if d[f] != 'r':
1204 if d[f] != 'r':
1205 yield f
1205 yield f
1206
1206
1207 def __contains__(self, key):
1207 def __contains__(self, key):
1208 return self._repo.dirstate[key] not in "?r"
1208 return self._repo.dirstate[key] not in "?r"
1209
1209
1210 @propertycache
1210 @propertycache
1211 def _parents(self):
1211 def _parents(self):
1212 p = self._repo.dirstate.parents()
1212 p = self._repo.dirstate.parents()
1213 if p[1] == nullid:
1213 if p[1] == nullid:
1214 p = p[:-1]
1214 p = p[:-1]
1215 return [changectx(self._repo, x) for x in p]
1215 return [changectx(self._repo, x) for x in p]
1216
1216
1217 def filectx(self, path, filelog=None):
1217 def filectx(self, path, filelog=None):
1218 """get a file context from the working directory"""
1218 """get a file context from the working directory"""
1219 return workingfilectx(self._repo, path, workingctx=self,
1219 return workingfilectx(self._repo, path, workingctx=self,
1220 filelog=filelog)
1220 filelog=filelog)
1221
1221
1222 def dirty(self, missing=False, merge=True, branch=True):
1222 def dirty(self, missing=False, merge=True, branch=True):
1223 "check whether a working directory is modified"
1223 "check whether a working directory is modified"
1224 # check subrepos first
1224 # check subrepos first
1225 for s in sorted(self.substate):
1225 for s in sorted(self.substate):
1226 if self.sub(s).dirty():
1226 if self.sub(s).dirty():
1227 return True
1227 return True
1228 # check current working dir
1228 # check current working dir
1229 return ((merge and self.p2()) or
1229 return ((merge and self.p2()) or
1230 (branch and self.branch() != self.p1().branch()) or
1230 (branch and self.branch() != self.p1().branch()) or
1231 self.modified() or self.added() or self.removed() or
1231 self.modified() or self.added() or self.removed() or
1232 (missing and self.deleted()))
1232 (missing and self.deleted()))
1233
1233
1234 def add(self, list, prefix=""):
1234 def add(self, list, prefix=""):
1235 join = lambda f: os.path.join(prefix, f)
1235 join = lambda f: os.path.join(prefix, f)
1236 wlock = self._repo.wlock()
1236 wlock = self._repo.wlock()
1237 ui, ds = self._repo.ui, self._repo.dirstate
1237 ui, ds = self._repo.ui, self._repo.dirstate
1238 try:
1238 try:
1239 rejected = []
1239 rejected = []
1240 lstat = self._repo.wvfs.lstat
1240 lstat = self._repo.wvfs.lstat
1241 for f in list:
1241 for f in list:
1242 scmutil.checkportable(ui, join(f))
1242 scmutil.checkportable(ui, join(f))
1243 try:
1243 try:
1244 st = lstat(f)
1244 st = lstat(f)
1245 except OSError:
1245 except OSError:
1246 ui.warn(_("%s does not exist!\n") % join(f))
1246 ui.warn(_("%s does not exist!\n") % join(f))
1247 rejected.append(f)
1247 rejected.append(f)
1248 continue
1248 continue
1249 if st.st_size > 10000000:
1249 if st.st_size > 10000000:
1250 ui.warn(_("%s: up to %d MB of RAM may be required "
1250 ui.warn(_("%s: up to %d MB of RAM may be required "
1251 "to manage this file\n"
1251 "to manage this file\n"
1252 "(use 'hg revert %s' to cancel the "
1252 "(use 'hg revert %s' to cancel the "
1253 "pending addition)\n")
1253 "pending addition)\n")
1254 % (f, 3 * st.st_size // 1000000, join(f)))
1254 % (f, 3 * st.st_size // 1000000, join(f)))
1255 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1255 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1256 ui.warn(_("%s not added: only files and symlinks "
1256 ui.warn(_("%s not added: only files and symlinks "
1257 "supported currently\n") % join(f))
1257 "supported currently\n") % join(f))
1258 rejected.append(f)
1258 rejected.append(f)
1259 elif ds[f] in 'amn':
1259 elif ds[f] in 'amn':
1260 ui.warn(_("%s already tracked!\n") % join(f))
1260 ui.warn(_("%s already tracked!\n") % join(f))
1261 elif ds[f] == 'r':
1261 elif ds[f] == 'r':
1262 ds.normallookup(f)
1262 ds.normallookup(f)
1263 else:
1263 else:
1264 ds.add(f)
1264 ds.add(f)
1265 return rejected
1265 return rejected
1266 finally:
1266 finally:
1267 wlock.release()
1267 wlock.release()
1268
1268
1269 def forget(self, files, prefix=""):
1269 def forget(self, files, prefix=""):
1270 join = lambda f: os.path.join(prefix, f)
1270 join = lambda f: os.path.join(prefix, f)
1271 wlock = self._repo.wlock()
1271 wlock = self._repo.wlock()
1272 try:
1272 try:
1273 rejected = []
1273 rejected = []
1274 for f in files:
1274 for f in files:
1275 if f not in self._repo.dirstate:
1275 if f not in self._repo.dirstate:
1276 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1276 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1277 rejected.append(f)
1277 rejected.append(f)
1278 elif self._repo.dirstate[f] != 'a':
1278 elif self._repo.dirstate[f] != 'a':
1279 self._repo.dirstate.remove(f)
1279 self._repo.dirstate.remove(f)
1280 else:
1280 else:
1281 self._repo.dirstate.drop(f)
1281 self._repo.dirstate.drop(f)
1282 return rejected
1282 return rejected
1283 finally:
1283 finally:
1284 wlock.release()
1284 wlock.release()
1285
1285
1286 def undelete(self, list):
1286 def undelete(self, list):
1287 pctxs = self.parents()
1287 pctxs = self.parents()
1288 wlock = self._repo.wlock()
1288 wlock = self._repo.wlock()
1289 try:
1289 try:
1290 for f in list:
1290 for f in list:
1291 if self._repo.dirstate[f] != 'r':
1291 if self._repo.dirstate[f] != 'r':
1292 self._repo.ui.warn(_("%s not removed!\n") % f)
1292 self._repo.ui.warn(_("%s not removed!\n") % f)
1293 else:
1293 else:
1294 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1294 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1295 t = fctx.data()
1295 t = fctx.data()
1296 self._repo.wwrite(f, t, fctx.flags())
1296 self._repo.wwrite(f, t, fctx.flags())
1297 self._repo.dirstate.normal(f)
1297 self._repo.dirstate.normal(f)
1298 finally:
1298 finally:
1299 wlock.release()
1299 wlock.release()
1300
1300
1301 def copy(self, source, dest):
1301 def copy(self, source, dest):
1302 try:
1302 try:
1303 st = self._repo.wvfs.lstat(dest)
1303 st = self._repo.wvfs.lstat(dest)
1304 except OSError, err:
1304 except OSError, err:
1305 if err.errno != errno.ENOENT:
1305 if err.errno != errno.ENOENT:
1306 raise
1306 raise
1307 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1307 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1308 return
1308 return
1309 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1309 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1310 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1310 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1311 "symbolic link\n") % dest)
1311 "symbolic link\n") % dest)
1312 else:
1312 else:
1313 wlock = self._repo.wlock()
1313 wlock = self._repo.wlock()
1314 try:
1314 try:
1315 if self._repo.dirstate[dest] in '?r':
1315 if self._repo.dirstate[dest] in '?r':
1316 self._repo.dirstate.add(dest)
1316 self._repo.dirstate.add(dest)
1317 self._repo.dirstate.copy(source, dest)
1317 self._repo.dirstate.copy(source, dest)
1318 finally:
1318 finally:
1319 wlock.release()
1319 wlock.release()
1320
1320
1321 def _filtersuspectsymlink(self, files):
1321 def _filtersuspectsymlink(self, files):
1322 if not files or self._repo.dirstate._checklink:
1322 if not files or self._repo.dirstate._checklink:
1323 return files
1323 return files
1324
1324
1325 # Symlink placeholders may get non-symlink-like contents
1325 # Symlink placeholders may get non-symlink-like contents
1326 # via user error or dereferencing by NFS or Samba servers,
1326 # via user error or dereferencing by NFS or Samba servers,
1327 # so we filter out any placeholders that don't look like a
1327 # so we filter out any placeholders that don't look like a
1328 # symlink
1328 # symlink
1329 sane = []
1329 sane = []
1330 for f in files:
1330 for f in files:
1331 if self.flags(f) == 'l':
1331 if self.flags(f) == 'l':
1332 d = self[f].data()
1332 d = self[f].data()
1333 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1333 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1334 self._repo.ui.debug('ignoring suspect symlink placeholder'
1334 self._repo.ui.debug('ignoring suspect symlink placeholder'
1335 ' "%s"\n' % f)
1335 ' "%s"\n' % f)
1336 continue
1336 continue
1337 sane.append(f)
1337 sane.append(f)
1338 return sane
1338 return sane
1339
1339
1340 def _checklookup(self, files):
1340 def _checklookup(self, files):
1341 # check for any possibly clean files
1341 # check for any possibly clean files
1342 if not files:
1342 if not files:
1343 return [], []
1343 return [], []
1344
1344
1345 modified = []
1345 modified = []
1346 fixup = []
1346 fixup = []
1347 pctx = self._parents[0]
1347 pctx = self._parents[0]
1348 # do a full compare of any files that might have changed
1348 # do a full compare of any files that might have changed
1349 for f in sorted(files):
1349 for f in sorted(files):
1350 if (f not in pctx or self.flags(f) != pctx.flags(f)
1350 if (f not in pctx or self.flags(f) != pctx.flags(f)
1351 or pctx[f].cmp(self[f])):
1351 or pctx[f].cmp(self[f])):
1352 modified.append(f)
1352 modified.append(f)
1353 else:
1353 else:
1354 fixup.append(f)
1354 fixup.append(f)
1355
1355
1356 # update dirstate for files that are actually clean
1356 # update dirstate for files that are actually clean
1357 if fixup:
1357 if fixup:
1358 try:
1358 try:
1359 # updating the dirstate is optional
1359 # updating the dirstate is optional
1360 # so we don't wait on the lock
1360 # so we don't wait on the lock
1361 # wlock can invalidate the dirstate, so cache normal _after_
1361 # wlock can invalidate the dirstate, so cache normal _after_
1362 # taking the lock
1362 # taking the lock
1363 wlock = self._repo.wlock(False)
1363 wlock = self._repo.wlock(False)
1364 normal = self._repo.dirstate.normal
1364 normal = self._repo.dirstate.normal
1365 try:
1365 try:
1366 for f in fixup:
1366 for f in fixup:
1367 normal(f)
1367 normal(f)
1368 finally:
1368 finally:
1369 wlock.release()
1369 wlock.release()
1370 except error.LockError:
1370 except error.LockError:
1371 pass
1371 pass
1372 return modified, fixup
1372 return modified, fixup
1373
1373
1374 def _manifestmatches(self, match, s):
1374 def _manifestmatches(self, match, s):
1375 """Slow path for workingctx
1375 """Slow path for workingctx
1376
1376
1377 The fast path is when we compare the working directory to its parent
1377 The fast path is when we compare the working directory to its parent
1378 which means this function is comparing with a non-parent; therefore we
1378 which means this function is comparing with a non-parent; therefore we
1379 need to build a manifest and return what matches.
1379 need to build a manifest and return what matches.
1380 """
1380 """
1381 mf = self._repo['.']._manifestmatches(match, s)
1381 mf = self._repo['.']._manifestmatches(match, s)
1382 modified, added, removed = s[0:3]
1382 modified, added, removed = s[0:3]
1383 for f in modified + added:
1383 for f in modified + added:
1384 mf[f] = None
1384 mf[f] = None
1385 mf.set(f, self.flags(f))
1385 mf.set(f, self.flags(f))
1386 for f in removed:
1386 for f in removed:
1387 if f in mf:
1387 if f in mf:
1388 del mf[f]
1388 del mf[f]
1389 return mf
1389 return mf
1390
1390
1391 def _prestatus(self, other, s, match, listignored, listclean, listunknown):
1391 def _prestatus(self, other, s, match, listignored, listclean, listunknown):
1392 """override the parent hook with a dirstate query
1392 """override the parent hook with a dirstate query
1393
1393
1394 We use this prestatus hook to populate the status with information from
1394 We use this prestatus hook to populate the status with information from
1395 the dirstate.
1395 the dirstate.
1396 """
1396 """
1397 # doesn't need to call super; if that changes, be aware that super
1397 # doesn't need to call super; if that changes, be aware that super
1398 # calls self.manifest which would slow down the common case of calling
1398 # calls self.manifest which would slow down the common case of calling
1399 # status against a workingctx's parent
1399 # status against a workingctx's parent
1400 return self._dirstatestatus(match, listignored, listclean, listunknown)
1400 return self._dirstatestatus(match, listignored, listclean, listunknown)
1401
1401
1402 def _poststatus(self, other, s, match, listignored, listclean, listunknown):
1402 def _poststatus(self, other, s, match, listignored, listclean, listunknown):
1403 """override the parent hook with a filter for suspect symlinks
1403 """override the parent hook with a filter for suspect symlinks
1404
1404
1405 We use this poststatus hook to filter out symlinks that might have
1405 We use this poststatus hook to filter out symlinks that might have
1406 accidentally ended up with the entire contents of the file they are
1406 accidentally ended up with the entire contents of the file they are
1407 susposed to be linking to.
1407 susposed to be linking to.
1408 """
1408 """
1409 s[0] = self._filtersuspectsymlink(s[0])
1409 s[0] = self._filtersuspectsymlink(s[0])
1410 self._status = s[:]
1410 self._status = s[:]
1411 return s
1411 return s
1412
1412
1413 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1413 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1414 unknown=False):
1414 unknown=False):
1415 '''Gets the status from the dirstate -- internal use only.'''
1415 '''Gets the status from the dirstate -- internal use only.'''
1416 listignored, listclean, listunknown = ignored, clean, unknown
1416 listignored, listclean, listunknown = ignored, clean, unknown
1417 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1417 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1418 subrepos = []
1418 subrepos = []
1419 if '.hgsub' in self:
1419 if '.hgsub' in self:
1420 subrepos = sorted(self.substate)
1420 subrepos = sorted(self.substate)
1421 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1421 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1422 listclean, listunknown)
1422 listclean, listunknown)
1423 modified, added, removed, deleted, unknown, ignored, clean = s
1423 modified, added, removed, deleted, unknown, ignored, clean = s
1424
1424
1425 # check for any possibly clean files
1425 # check for any possibly clean files
1426 if cmp:
1426 if cmp:
1427 modified2, fixup = self._checklookup(cmp)
1427 modified2, fixup = self._checklookup(cmp)
1428 modified += modified2
1428 modified += modified2
1429
1429
1430 # update dirstate for files that are actually clean
1430 # update dirstate for files that are actually clean
1431 if fixup and listclean:
1431 if fixup and listclean:
1432 clean += fixup
1432 clean += fixup
1433
1433
1434 return [modified, added, removed, deleted, unknown, ignored, clean]
1434 return [modified, added, removed, deleted, unknown, ignored, clean]
1435
1435
1436 def _buildstatus(self, other, s, match, listignored, listclean,
1436 def _buildstatus(self, other, s, match, listignored, listclean,
1437 listunknown):
1437 listunknown):
1438 """build a status with respect to another context
1438 """build a status with respect to another context
1439
1439
1440 This includes logic for maintaining the fast path of status when
1440 This includes logic for maintaining the fast path of status when
1441 comparing the working directory against its parent, which is to skip
1441 comparing the working directory against its parent, which is to skip
1442 building a new manifest if self (working directory) is not comparing
1442 building a new manifest if self (working directory) is not comparing
1443 against its parent (repo['.']).
1443 against its parent (repo['.']).
1444 """
1444 """
1445 if other != self._repo['.']:
1445 if other != self._repo['.']:
1446 s = super(workingctx, self)._buildstatus(other, s, match,
1446 s = super(workingctx, self)._buildstatus(other, s, match,
1447 listignored, listclean,
1447 listignored, listclean,
1448 listunknown)
1448 listunknown)
1449 return s
1449 return s
1450
1450
1451 def _matchstatus(self, other, s, match, listignored, listclean,
1451 def _matchstatus(self, other, s, match, listignored, listclean,
1452 listunknown):
1452 listunknown):
1453 """override the match method with a filter for directory patterns
1453 """override the match method with a filter for directory patterns
1454
1454
1455 We use inheritance to customize the match.bad method only in cases of
1455 We use inheritance to customize the match.bad method only in cases of
1456 workingctx since it belongs only to the working directory when
1456 workingctx since it belongs only to the working directory when
1457 comparing against the parent changeset.
1457 comparing against the parent changeset.
1458
1458
1459 If we aren't comparing against the working directory's parent, then we
1459 If we aren't comparing against the working directory's parent, then we
1460 just use the default match object sent to us.
1460 just use the default match object sent to us.
1461 """
1461 """
1462 superself = super(workingctx, self)
1462 superself = super(workingctx, self)
1463 match = superself._matchstatus(other, s, match, listignored, listclean,
1463 match = superself._matchstatus(other, s, match, listignored, listclean,
1464 listunknown)
1464 listunknown)
1465 if other != self._repo['.']:
1465 if other != self._repo['.']:
1466 def bad(f, msg):
1466 def bad(f, msg):
1467 # 'f' may be a directory pattern from 'match.files()',
1467 # 'f' may be a directory pattern from 'match.files()',
1468 # so 'f not in ctx1' is not enough
1468 # so 'f not in ctx1' is not enough
1469 if f not in other and f not in other.dirs():
1469 if f not in other and f not in other.dirs():
1470 self._repo.ui.warn('%s: %s\n' %
1470 self._repo.ui.warn('%s: %s\n' %
1471 (self._repo.dirstate.pathto(f), msg))
1471 (self._repo.dirstate.pathto(f), msg))
1472 match.bad = bad
1472 match.bad = bad
1473 return match
1473 return match
1474
1474
1475 def status(self, other='.', match=None, listignored=False,
1475 def status(self, other='.', match=None, listignored=False,
1476 listclean=False, listunknown=False, listsubrepos=False):
1476 listclean=False, listunknown=False, listsubrepos=False):
1477 # yet to be determined: what to do if 'other' is a 'workingctx' or a
1477 # yet to be determined: what to do if 'other' is a 'workingctx' or a
1478 # 'memctx'?
1478 # 'memctx'?
1479 s = super(workingctx, self).status(other, match, listignored, listclean,
1479 s = super(workingctx, self).status(other, match, listignored, listclean,
1480 listunknown, listsubrepos)
1480 listunknown, listsubrepos)
1481 # calling 'super' subtly reveresed the contexts, so we flip the results
1481 # calling 'super' subtly reveresed the contexts, so we flip the results
1482 # (s[1] is 'added' and s[2] is 'removed')
1482 # (s[1] is 'added' and s[2] is 'removed')
1483 s = list(s)
1483 s = list(s)
1484 s[1], s[2] = s[2], s[1]
1484 s[1], s[2] = s[2], s[1]
1485 return tuple(s)
1485 return scmutil.status(*s)
1486
1486
1487 class committablefilectx(basefilectx):
1487 class committablefilectx(basefilectx):
1488 """A committablefilectx provides common functionality for a file context
1488 """A committablefilectx provides common functionality for a file context
1489 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1489 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1490 def __init__(self, repo, path, filelog=None, ctx=None):
1490 def __init__(self, repo, path, filelog=None, ctx=None):
1491 self._repo = repo
1491 self._repo = repo
1492 self._path = path
1492 self._path = path
1493 self._changeid = None
1493 self._changeid = None
1494 self._filerev = self._filenode = None
1494 self._filerev = self._filenode = None
1495
1495
1496 if filelog is not None:
1496 if filelog is not None:
1497 self._filelog = filelog
1497 self._filelog = filelog
1498 if ctx:
1498 if ctx:
1499 self._changectx = ctx
1499 self._changectx = ctx
1500
1500
1501 def __nonzero__(self):
1501 def __nonzero__(self):
1502 return True
1502 return True
1503
1503
1504 def parents(self):
1504 def parents(self):
1505 '''return parent filectxs, following copies if necessary'''
1505 '''return parent filectxs, following copies if necessary'''
1506 def filenode(ctx, path):
1506 def filenode(ctx, path):
1507 return ctx._manifest.get(path, nullid)
1507 return ctx._manifest.get(path, nullid)
1508
1508
1509 path = self._path
1509 path = self._path
1510 fl = self._filelog
1510 fl = self._filelog
1511 pcl = self._changectx._parents
1511 pcl = self._changectx._parents
1512 renamed = self.renamed()
1512 renamed = self.renamed()
1513
1513
1514 if renamed:
1514 if renamed:
1515 pl = [renamed + (None,)]
1515 pl = [renamed + (None,)]
1516 else:
1516 else:
1517 pl = [(path, filenode(pcl[0], path), fl)]
1517 pl = [(path, filenode(pcl[0], path), fl)]
1518
1518
1519 for pc in pcl[1:]:
1519 for pc in pcl[1:]:
1520 pl.append((path, filenode(pc, path), fl))
1520 pl.append((path, filenode(pc, path), fl))
1521
1521
1522 return [filectx(self._repo, p, fileid=n, filelog=l)
1522 return [filectx(self._repo, p, fileid=n, filelog=l)
1523 for p, n, l in pl if n != nullid]
1523 for p, n, l in pl if n != nullid]
1524
1524
1525 def children(self):
1525 def children(self):
1526 return []
1526 return []
1527
1527
1528 class workingfilectx(committablefilectx):
1528 class workingfilectx(committablefilectx):
1529 """A workingfilectx object makes access to data related to a particular
1529 """A workingfilectx object makes access to data related to a particular
1530 file in the working directory convenient."""
1530 file in the working directory convenient."""
1531 def __init__(self, repo, path, filelog=None, workingctx=None):
1531 def __init__(self, repo, path, filelog=None, workingctx=None):
1532 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1532 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1533
1533
1534 @propertycache
1534 @propertycache
1535 def _changectx(self):
1535 def _changectx(self):
1536 return workingctx(self._repo)
1536 return workingctx(self._repo)
1537
1537
1538 def data(self):
1538 def data(self):
1539 return self._repo.wread(self._path)
1539 return self._repo.wread(self._path)
1540 def renamed(self):
1540 def renamed(self):
1541 rp = self._repo.dirstate.copied(self._path)
1541 rp = self._repo.dirstate.copied(self._path)
1542 if not rp:
1542 if not rp:
1543 return None
1543 return None
1544 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1544 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1545
1545
1546 def size(self):
1546 def size(self):
1547 return self._repo.wvfs.lstat(self._path).st_size
1547 return self._repo.wvfs.lstat(self._path).st_size
1548 def date(self):
1548 def date(self):
1549 t, tz = self._changectx.date()
1549 t, tz = self._changectx.date()
1550 try:
1550 try:
1551 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1551 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1552 except OSError, err:
1552 except OSError, err:
1553 if err.errno != errno.ENOENT:
1553 if err.errno != errno.ENOENT:
1554 raise
1554 raise
1555 return (t, tz)
1555 return (t, tz)
1556
1556
1557 def cmp(self, fctx):
1557 def cmp(self, fctx):
1558 """compare with other file context
1558 """compare with other file context
1559
1559
1560 returns True if different than fctx.
1560 returns True if different than fctx.
1561 """
1561 """
1562 # fctx should be a filectx (not a workingfilectx)
1562 # fctx should be a filectx (not a workingfilectx)
1563 # invert comparison to reuse the same code path
1563 # invert comparison to reuse the same code path
1564 return fctx.cmp(self)
1564 return fctx.cmp(self)
1565
1565
1566 def remove(self, ignoremissing=False):
1566 def remove(self, ignoremissing=False):
1567 """wraps unlink for a repo's working directory"""
1567 """wraps unlink for a repo's working directory"""
1568 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1568 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1569
1569
1570 def write(self, data, flags):
1570 def write(self, data, flags):
1571 """wraps repo.wwrite"""
1571 """wraps repo.wwrite"""
1572 self._repo.wwrite(self._path, data, flags)
1572 self._repo.wwrite(self._path, data, flags)
1573
1573
1574 class memctx(committablectx):
1574 class memctx(committablectx):
1575 """Use memctx to perform in-memory commits via localrepo.commitctx().
1575 """Use memctx to perform in-memory commits via localrepo.commitctx().
1576
1576
1577 Revision information is supplied at initialization time while
1577 Revision information is supplied at initialization time while
1578 related files data and is made available through a callback
1578 related files data and is made available through a callback
1579 mechanism. 'repo' is the current localrepo, 'parents' is a
1579 mechanism. 'repo' is the current localrepo, 'parents' is a
1580 sequence of two parent revisions identifiers (pass None for every
1580 sequence of two parent revisions identifiers (pass None for every
1581 missing parent), 'text' is the commit message and 'files' lists
1581 missing parent), 'text' is the commit message and 'files' lists
1582 names of files touched by the revision (normalized and relative to
1582 names of files touched by the revision (normalized and relative to
1583 repository root).
1583 repository root).
1584
1584
1585 filectxfn(repo, memctx, path) is a callable receiving the
1585 filectxfn(repo, memctx, path) is a callable receiving the
1586 repository, the current memctx object and the normalized path of
1586 repository, the current memctx object and the normalized path of
1587 requested file, relative to repository root. It is fired by the
1587 requested file, relative to repository root. It is fired by the
1588 commit function for every file in 'files', but calls order is
1588 commit function for every file in 'files', but calls order is
1589 undefined. If the file is available in the revision being
1589 undefined. If the file is available in the revision being
1590 committed (updated or added), filectxfn returns a memfilectx
1590 committed (updated or added), filectxfn returns a memfilectx
1591 object. If the file was removed, filectxfn raises an
1591 object. If the file was removed, filectxfn raises an
1592 IOError. Moved files are represented by marking the source file
1592 IOError. Moved files are represented by marking the source file
1593 removed and the new file added with copy information (see
1593 removed and the new file added with copy information (see
1594 memfilectx).
1594 memfilectx).
1595
1595
1596 user receives the committer name and defaults to current
1596 user receives the committer name and defaults to current
1597 repository username, date is the commit date in any format
1597 repository username, date is the commit date in any format
1598 supported by util.parsedate() and defaults to current date, extra
1598 supported by util.parsedate() and defaults to current date, extra
1599 is a dictionary of metadata or is left empty.
1599 is a dictionary of metadata or is left empty.
1600 """
1600 """
1601
1601
1602 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1602 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1603 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1603 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1604 # this field to determine what to do in filectxfn.
1604 # this field to determine what to do in filectxfn.
1605 _returnnoneformissingfiles = True
1605 _returnnoneformissingfiles = True
1606
1606
1607 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1607 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1608 date=None, extra=None, editor=False):
1608 date=None, extra=None, editor=False):
1609 super(memctx, self).__init__(repo, text, user, date, extra)
1609 super(memctx, self).__init__(repo, text, user, date, extra)
1610 self._rev = None
1610 self._rev = None
1611 self._node = None
1611 self._node = None
1612 parents = [(p or nullid) for p in parents]
1612 parents = [(p or nullid) for p in parents]
1613 p1, p2 = parents
1613 p1, p2 = parents
1614 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1614 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1615 files = sorted(set(files))
1615 files = sorted(set(files))
1616 self._status = [files, [], [], [], []]
1616 self._status = [files, [], [], [], []]
1617 self._filectxfn = filectxfn
1617 self._filectxfn = filectxfn
1618 self.substate = {}
1618 self.substate = {}
1619
1619
1620 # if store is not callable, wrap it in a function
1620 # if store is not callable, wrap it in a function
1621 if not callable(filectxfn):
1621 if not callable(filectxfn):
1622 def getfilectx(repo, memctx, path):
1622 def getfilectx(repo, memctx, path):
1623 fctx = filectxfn[path]
1623 fctx = filectxfn[path]
1624 # this is weird but apparently we only keep track of one parent
1624 # this is weird but apparently we only keep track of one parent
1625 # (why not only store that instead of a tuple?)
1625 # (why not only store that instead of a tuple?)
1626 copied = fctx.renamed()
1626 copied = fctx.renamed()
1627 if copied:
1627 if copied:
1628 copied = copied[0]
1628 copied = copied[0]
1629 return memfilectx(repo, path, fctx.data(),
1629 return memfilectx(repo, path, fctx.data(),
1630 islink=fctx.islink(), isexec=fctx.isexec(),
1630 islink=fctx.islink(), isexec=fctx.isexec(),
1631 copied=copied, memctx=memctx)
1631 copied=copied, memctx=memctx)
1632 self._filectxfn = getfilectx
1632 self._filectxfn = getfilectx
1633
1633
1634 self._extra = extra and extra.copy() or {}
1634 self._extra = extra and extra.copy() or {}
1635 if self._extra.get('branch', '') == '':
1635 if self._extra.get('branch', '') == '':
1636 self._extra['branch'] = 'default'
1636 self._extra['branch'] = 'default'
1637
1637
1638 if editor:
1638 if editor:
1639 self._text = editor(self._repo, self, [])
1639 self._text = editor(self._repo, self, [])
1640 self._repo.savecommitmessage(self._text)
1640 self._repo.savecommitmessage(self._text)
1641
1641
1642 def filectx(self, path, filelog=None):
1642 def filectx(self, path, filelog=None):
1643 """get a file context from the working directory
1643 """get a file context from the working directory
1644
1644
1645 Returns None if file doesn't exist and should be removed."""
1645 Returns None if file doesn't exist and should be removed."""
1646 return self._filectxfn(self._repo, self, path)
1646 return self._filectxfn(self._repo, self, path)
1647
1647
1648 def commit(self):
1648 def commit(self):
1649 """commit context to the repo"""
1649 """commit context to the repo"""
1650 return self._repo.commitctx(self)
1650 return self._repo.commitctx(self)
1651
1651
1652 @propertycache
1652 @propertycache
1653 def _manifest(self):
1653 def _manifest(self):
1654 """generate a manifest based on the return values of filectxfn"""
1654 """generate a manifest based on the return values of filectxfn"""
1655
1655
1656 # keep this simple for now; just worry about p1
1656 # keep this simple for now; just worry about p1
1657 pctx = self._parents[0]
1657 pctx = self._parents[0]
1658 man = pctx.manifest().copy()
1658 man = pctx.manifest().copy()
1659
1659
1660 for f, fnode in man.iteritems():
1660 for f, fnode in man.iteritems():
1661 p1node = nullid
1661 p1node = nullid
1662 p2node = nullid
1662 p2node = nullid
1663 p = pctx[f].parents() # if file isn't in pctx, check p2?
1663 p = pctx[f].parents() # if file isn't in pctx, check p2?
1664 if len(p) > 0:
1664 if len(p) > 0:
1665 p1node = p[0].node()
1665 p1node = p[0].node()
1666 if len(p) > 1:
1666 if len(p) > 1:
1667 p2node = p[1].node()
1667 p2node = p[1].node()
1668 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1668 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1669
1669
1670 return man
1670 return man
1671
1671
1672
1672
1673 class memfilectx(committablefilectx):
1673 class memfilectx(committablefilectx):
1674 """memfilectx represents an in-memory file to commit.
1674 """memfilectx represents an in-memory file to commit.
1675
1675
1676 See memctx and commitablefilectx for more details.
1676 See memctx and commitablefilectx for more details.
1677 """
1677 """
1678 def __init__(self, repo, path, data, islink=False,
1678 def __init__(self, repo, path, data, islink=False,
1679 isexec=False, copied=None, memctx=None):
1679 isexec=False, copied=None, memctx=None):
1680 """
1680 """
1681 path is the normalized file path relative to repository root.
1681 path is the normalized file path relative to repository root.
1682 data is the file content as a string.
1682 data is the file content as a string.
1683 islink is True if the file is a symbolic link.
1683 islink is True if the file is a symbolic link.
1684 isexec is True if the file is executable.
1684 isexec is True if the file is executable.
1685 copied is the source file path if current file was copied in the
1685 copied is the source file path if current file was copied in the
1686 revision being committed, or None."""
1686 revision being committed, or None."""
1687 super(memfilectx, self).__init__(repo, path, None, memctx)
1687 super(memfilectx, self).__init__(repo, path, None, memctx)
1688 self._data = data
1688 self._data = data
1689 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1689 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1690 self._copied = None
1690 self._copied = None
1691 if copied:
1691 if copied:
1692 self._copied = (copied, nullid)
1692 self._copied = (copied, nullid)
1693
1693
1694 def data(self):
1694 def data(self):
1695 return self._data
1695 return self._data
1696 def size(self):
1696 def size(self):
1697 return len(self.data())
1697 return len(self.data())
1698 def flags(self):
1698 def flags(self):
1699 return self._flags
1699 return self._flags
1700 def renamed(self):
1700 def renamed(self):
1701 return self._copied
1701 return self._copied
1702
1702
1703 def remove(self, ignoremissing=False):
1703 def remove(self, ignoremissing=False):
1704 """wraps unlink for a repo's working directory"""
1704 """wraps unlink for a repo's working directory"""
1705 # need to figure out what to do here
1705 # need to figure out what to do here
1706 del self._changectx[self._path]
1706 del self._changectx[self._path]
1707
1707
1708 def write(self, data, flags):
1708 def write(self, data, flags):
1709 """wraps repo.wwrite"""
1709 """wraps repo.wwrite"""
1710 self._data = data
1710 self._data = data
@@ -1,1595 +1,1596 b''
1 # subrepo.py - sub-repository handling for Mercurial
1 # subrepo.py - sub-repository handling for Mercurial
2 #
2 #
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import errno, os, re, shutil, posixpath, sys
8 import errno, os, re, shutil, posixpath, sys
9 import xml.dom.minidom
9 import xml.dom.minidom
10 import stat, subprocess, tarfile
10 import stat, subprocess, tarfile
11 from i18n import _
11 from i18n import _
12 import config, util, node, error, cmdutil, match as matchmod
12 import config, util, node, error, cmdutil, scmutil, match as matchmod
13 import phases
13 import phases
14 import pathutil
14 import pathutil
15 import exchange
15 import exchange
16 hg = None
16 hg = None
17 propertycache = util.propertycache
17 propertycache = util.propertycache
18
18
19 nullstate = ('', '', 'empty')
19 nullstate = ('', '', 'empty')
20
20
21 def _expandedabspath(path):
21 def _expandedabspath(path):
22 '''
22 '''
23 get a path or url and if it is a path expand it and return an absolute path
23 get a path or url and if it is a path expand it and return an absolute path
24 '''
24 '''
25 expandedpath = util.urllocalpath(util.expandpath(path))
25 expandedpath = util.urllocalpath(util.expandpath(path))
26 u = util.url(expandedpath)
26 u = util.url(expandedpath)
27 if not u.scheme:
27 if not u.scheme:
28 path = util.normpath(os.path.abspath(u.path))
28 path = util.normpath(os.path.abspath(u.path))
29 return path
29 return path
30
30
31 def _getstorehashcachename(remotepath):
31 def _getstorehashcachename(remotepath):
32 '''get a unique filename for the store hash cache of a remote repository'''
32 '''get a unique filename for the store hash cache of a remote repository'''
33 return util.sha1(_expandedabspath(remotepath)).hexdigest()[0:12]
33 return util.sha1(_expandedabspath(remotepath)).hexdigest()[0:12]
34
34
35 def _calcfilehash(filename):
35 def _calcfilehash(filename):
36 data = ''
36 data = ''
37 if os.path.exists(filename):
37 if os.path.exists(filename):
38 fd = open(filename, 'rb')
38 fd = open(filename, 'rb')
39 try:
39 try:
40 data = fd.read()
40 data = fd.read()
41 finally:
41 finally:
42 fd.close()
42 fd.close()
43 return util.sha1(data).hexdigest()
43 return util.sha1(data).hexdigest()
44
44
45 class SubrepoAbort(error.Abort):
45 class SubrepoAbort(error.Abort):
46 """Exception class used to avoid handling a subrepo error more than once"""
46 """Exception class used to avoid handling a subrepo error more than once"""
47 def __init__(self, *args, **kw):
47 def __init__(self, *args, **kw):
48 error.Abort.__init__(self, *args, **kw)
48 error.Abort.__init__(self, *args, **kw)
49 self.subrepo = kw.get('subrepo')
49 self.subrepo = kw.get('subrepo')
50 self.cause = kw.get('cause')
50 self.cause = kw.get('cause')
51
51
52 def annotatesubrepoerror(func):
52 def annotatesubrepoerror(func):
53 def decoratedmethod(self, *args, **kargs):
53 def decoratedmethod(self, *args, **kargs):
54 try:
54 try:
55 res = func(self, *args, **kargs)
55 res = func(self, *args, **kargs)
56 except SubrepoAbort, ex:
56 except SubrepoAbort, ex:
57 # This exception has already been handled
57 # This exception has already been handled
58 raise ex
58 raise ex
59 except error.Abort, ex:
59 except error.Abort, ex:
60 subrepo = subrelpath(self)
60 subrepo = subrelpath(self)
61 errormsg = str(ex) + ' ' + _('(in subrepo %s)') % subrepo
61 errormsg = str(ex) + ' ' + _('(in subrepo %s)') % subrepo
62 # avoid handling this exception by raising a SubrepoAbort exception
62 # avoid handling this exception by raising a SubrepoAbort exception
63 raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo,
63 raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo,
64 cause=sys.exc_info())
64 cause=sys.exc_info())
65 return res
65 return res
66 return decoratedmethod
66 return decoratedmethod
67
67
68 def state(ctx, ui):
68 def state(ctx, ui):
69 """return a state dict, mapping subrepo paths configured in .hgsub
69 """return a state dict, mapping subrepo paths configured in .hgsub
70 to tuple: (source from .hgsub, revision from .hgsubstate, kind
70 to tuple: (source from .hgsub, revision from .hgsubstate, kind
71 (key in types dict))
71 (key in types dict))
72 """
72 """
73 p = config.config()
73 p = config.config()
74 def read(f, sections=None, remap=None):
74 def read(f, sections=None, remap=None):
75 if f in ctx:
75 if f in ctx:
76 try:
76 try:
77 data = ctx[f].data()
77 data = ctx[f].data()
78 except IOError, err:
78 except IOError, err:
79 if err.errno != errno.ENOENT:
79 if err.errno != errno.ENOENT:
80 raise
80 raise
81 # handle missing subrepo spec files as removed
81 # handle missing subrepo spec files as removed
82 ui.warn(_("warning: subrepo spec file %s not found\n") % f)
82 ui.warn(_("warning: subrepo spec file %s not found\n") % f)
83 return
83 return
84 p.parse(f, data, sections, remap, read)
84 p.parse(f, data, sections, remap, read)
85 else:
85 else:
86 raise util.Abort(_("subrepo spec file %s not found") % f)
86 raise util.Abort(_("subrepo spec file %s not found") % f)
87
87
88 if '.hgsub' in ctx:
88 if '.hgsub' in ctx:
89 read('.hgsub')
89 read('.hgsub')
90
90
91 for path, src in ui.configitems('subpaths'):
91 for path, src in ui.configitems('subpaths'):
92 p.set('subpaths', path, src, ui.configsource('subpaths', path))
92 p.set('subpaths', path, src, ui.configsource('subpaths', path))
93
93
94 rev = {}
94 rev = {}
95 if '.hgsubstate' in ctx:
95 if '.hgsubstate' in ctx:
96 try:
96 try:
97 for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()):
97 for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()):
98 l = l.lstrip()
98 l = l.lstrip()
99 if not l:
99 if not l:
100 continue
100 continue
101 try:
101 try:
102 revision, path = l.split(" ", 1)
102 revision, path = l.split(" ", 1)
103 except ValueError:
103 except ValueError:
104 raise util.Abort(_("invalid subrepository revision "
104 raise util.Abort(_("invalid subrepository revision "
105 "specifier in .hgsubstate line %d")
105 "specifier in .hgsubstate line %d")
106 % (i + 1))
106 % (i + 1))
107 rev[path] = revision
107 rev[path] = revision
108 except IOError, err:
108 except IOError, err:
109 if err.errno != errno.ENOENT:
109 if err.errno != errno.ENOENT:
110 raise
110 raise
111
111
112 def remap(src):
112 def remap(src):
113 for pattern, repl in p.items('subpaths'):
113 for pattern, repl in p.items('subpaths'):
114 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
114 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
115 # does a string decode.
115 # does a string decode.
116 repl = repl.encode('string-escape')
116 repl = repl.encode('string-escape')
117 # However, we still want to allow back references to go
117 # However, we still want to allow back references to go
118 # through unharmed, so we turn r'\\1' into r'\1'. Again,
118 # through unharmed, so we turn r'\\1' into r'\1'. Again,
119 # extra escapes are needed because re.sub string decodes.
119 # extra escapes are needed because re.sub string decodes.
120 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
120 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
121 try:
121 try:
122 src = re.sub(pattern, repl, src, 1)
122 src = re.sub(pattern, repl, src, 1)
123 except re.error, e:
123 except re.error, e:
124 raise util.Abort(_("bad subrepository pattern in %s: %s")
124 raise util.Abort(_("bad subrepository pattern in %s: %s")
125 % (p.source('subpaths', pattern), e))
125 % (p.source('subpaths', pattern), e))
126 return src
126 return src
127
127
128 state = {}
128 state = {}
129 for path, src in p[''].items():
129 for path, src in p[''].items():
130 kind = 'hg'
130 kind = 'hg'
131 if src.startswith('['):
131 if src.startswith('['):
132 if ']' not in src:
132 if ']' not in src:
133 raise util.Abort(_('missing ] in subrepo source'))
133 raise util.Abort(_('missing ] in subrepo source'))
134 kind, src = src.split(']', 1)
134 kind, src = src.split(']', 1)
135 kind = kind[1:]
135 kind = kind[1:]
136 src = src.lstrip() # strip any extra whitespace after ']'
136 src = src.lstrip() # strip any extra whitespace after ']'
137
137
138 if not util.url(src).isabs():
138 if not util.url(src).isabs():
139 parent = _abssource(ctx._repo, abort=False)
139 parent = _abssource(ctx._repo, abort=False)
140 if parent:
140 if parent:
141 parent = util.url(parent)
141 parent = util.url(parent)
142 parent.path = posixpath.join(parent.path or '', src)
142 parent.path = posixpath.join(parent.path or '', src)
143 parent.path = posixpath.normpath(parent.path)
143 parent.path = posixpath.normpath(parent.path)
144 joined = str(parent)
144 joined = str(parent)
145 # Remap the full joined path and use it if it changes,
145 # Remap the full joined path and use it if it changes,
146 # else remap the original source.
146 # else remap the original source.
147 remapped = remap(joined)
147 remapped = remap(joined)
148 if remapped == joined:
148 if remapped == joined:
149 src = remap(src)
149 src = remap(src)
150 else:
150 else:
151 src = remapped
151 src = remapped
152
152
153 src = remap(src)
153 src = remap(src)
154 state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
154 state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
155
155
156 return state
156 return state
157
157
158 def writestate(repo, state):
158 def writestate(repo, state):
159 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
159 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
160 lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)]
160 lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)]
161 repo.wwrite('.hgsubstate', ''.join(lines), '')
161 repo.wwrite('.hgsubstate', ''.join(lines), '')
162
162
163 def submerge(repo, wctx, mctx, actx, overwrite):
163 def submerge(repo, wctx, mctx, actx, overwrite):
164 """delegated from merge.applyupdates: merging of .hgsubstate file
164 """delegated from merge.applyupdates: merging of .hgsubstate file
165 in working context, merging context and ancestor context"""
165 in working context, merging context and ancestor context"""
166 if mctx == actx: # backwards?
166 if mctx == actx: # backwards?
167 actx = wctx.p1()
167 actx = wctx.p1()
168 s1 = wctx.substate
168 s1 = wctx.substate
169 s2 = mctx.substate
169 s2 = mctx.substate
170 sa = actx.substate
170 sa = actx.substate
171 sm = {}
171 sm = {}
172
172
173 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
173 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
174
174
175 def debug(s, msg, r=""):
175 def debug(s, msg, r=""):
176 if r:
176 if r:
177 r = "%s:%s:%s" % r
177 r = "%s:%s:%s" % r
178 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
178 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
179
179
180 for s, l in sorted(s1.iteritems()):
180 for s, l in sorted(s1.iteritems()):
181 a = sa.get(s, nullstate)
181 a = sa.get(s, nullstate)
182 ld = l # local state with possible dirty flag for compares
182 ld = l # local state with possible dirty flag for compares
183 if wctx.sub(s).dirty():
183 if wctx.sub(s).dirty():
184 ld = (l[0], l[1] + "+")
184 ld = (l[0], l[1] + "+")
185 if wctx == actx: # overwrite
185 if wctx == actx: # overwrite
186 a = ld
186 a = ld
187
187
188 if s in s2:
188 if s in s2:
189 r = s2[s]
189 r = s2[s]
190 if ld == r or r == a: # no change or local is newer
190 if ld == r or r == a: # no change or local is newer
191 sm[s] = l
191 sm[s] = l
192 continue
192 continue
193 elif ld == a: # other side changed
193 elif ld == a: # other side changed
194 debug(s, "other changed, get", r)
194 debug(s, "other changed, get", r)
195 wctx.sub(s).get(r, overwrite)
195 wctx.sub(s).get(r, overwrite)
196 sm[s] = r
196 sm[s] = r
197 elif ld[0] != r[0]: # sources differ
197 elif ld[0] != r[0]: # sources differ
198 if repo.ui.promptchoice(
198 if repo.ui.promptchoice(
199 _(' subrepository sources for %s differ\n'
199 _(' subrepository sources for %s differ\n'
200 'use (l)ocal source (%s) or (r)emote source (%s)?'
200 'use (l)ocal source (%s) or (r)emote source (%s)?'
201 '$$ &Local $$ &Remote') % (s, l[0], r[0]), 0):
201 '$$ &Local $$ &Remote') % (s, l[0], r[0]), 0):
202 debug(s, "prompt changed, get", r)
202 debug(s, "prompt changed, get", r)
203 wctx.sub(s).get(r, overwrite)
203 wctx.sub(s).get(r, overwrite)
204 sm[s] = r
204 sm[s] = r
205 elif ld[1] == a[1]: # local side is unchanged
205 elif ld[1] == a[1]: # local side is unchanged
206 debug(s, "other side changed, get", r)
206 debug(s, "other side changed, get", r)
207 wctx.sub(s).get(r, overwrite)
207 wctx.sub(s).get(r, overwrite)
208 sm[s] = r
208 sm[s] = r
209 else:
209 else:
210 debug(s, "both sides changed")
210 debug(s, "both sides changed")
211 srepo = wctx.sub(s)
211 srepo = wctx.sub(s)
212 option = repo.ui.promptchoice(
212 option = repo.ui.promptchoice(
213 _(' subrepository %s diverged (local revision: %s, '
213 _(' subrepository %s diverged (local revision: %s, '
214 'remote revision: %s)\n'
214 'remote revision: %s)\n'
215 '(M)erge, keep (l)ocal or keep (r)emote?'
215 '(M)erge, keep (l)ocal or keep (r)emote?'
216 '$$ &Merge $$ &Local $$ &Remote')
216 '$$ &Merge $$ &Local $$ &Remote')
217 % (s, srepo.shortid(l[1]), srepo.shortid(r[1])), 0)
217 % (s, srepo.shortid(l[1]), srepo.shortid(r[1])), 0)
218 if option == 0:
218 if option == 0:
219 wctx.sub(s).merge(r)
219 wctx.sub(s).merge(r)
220 sm[s] = l
220 sm[s] = l
221 debug(s, "merge with", r)
221 debug(s, "merge with", r)
222 elif option == 1:
222 elif option == 1:
223 sm[s] = l
223 sm[s] = l
224 debug(s, "keep local subrepo revision", l)
224 debug(s, "keep local subrepo revision", l)
225 else:
225 else:
226 wctx.sub(s).get(r, overwrite)
226 wctx.sub(s).get(r, overwrite)
227 sm[s] = r
227 sm[s] = r
228 debug(s, "get remote subrepo revision", r)
228 debug(s, "get remote subrepo revision", r)
229 elif ld == a: # remote removed, local unchanged
229 elif ld == a: # remote removed, local unchanged
230 debug(s, "remote removed, remove")
230 debug(s, "remote removed, remove")
231 wctx.sub(s).remove()
231 wctx.sub(s).remove()
232 elif a == nullstate: # not present in remote or ancestor
232 elif a == nullstate: # not present in remote or ancestor
233 debug(s, "local added, keep")
233 debug(s, "local added, keep")
234 sm[s] = l
234 sm[s] = l
235 continue
235 continue
236 else:
236 else:
237 if repo.ui.promptchoice(
237 if repo.ui.promptchoice(
238 _(' local changed subrepository %s which remote removed\n'
238 _(' local changed subrepository %s which remote removed\n'
239 'use (c)hanged version or (d)elete?'
239 'use (c)hanged version or (d)elete?'
240 '$$ &Changed $$ &Delete') % s, 0):
240 '$$ &Changed $$ &Delete') % s, 0):
241 debug(s, "prompt remove")
241 debug(s, "prompt remove")
242 wctx.sub(s).remove()
242 wctx.sub(s).remove()
243
243
244 for s, r in sorted(s2.items()):
244 for s, r in sorted(s2.items()):
245 if s in s1:
245 if s in s1:
246 continue
246 continue
247 elif s not in sa:
247 elif s not in sa:
248 debug(s, "remote added, get", r)
248 debug(s, "remote added, get", r)
249 mctx.sub(s).get(r)
249 mctx.sub(s).get(r)
250 sm[s] = r
250 sm[s] = r
251 elif r != sa[s]:
251 elif r != sa[s]:
252 if repo.ui.promptchoice(
252 if repo.ui.promptchoice(
253 _(' remote changed subrepository %s which local removed\n'
253 _(' remote changed subrepository %s which local removed\n'
254 'use (c)hanged version or (d)elete?'
254 'use (c)hanged version or (d)elete?'
255 '$$ &Changed $$ &Delete') % s, 0) == 0:
255 '$$ &Changed $$ &Delete') % s, 0) == 0:
256 debug(s, "prompt recreate", r)
256 debug(s, "prompt recreate", r)
257 wctx.sub(s).get(r)
257 wctx.sub(s).get(r)
258 sm[s] = r
258 sm[s] = r
259
259
260 # record merged .hgsubstate
260 # record merged .hgsubstate
261 writestate(repo, sm)
261 writestate(repo, sm)
262 return sm
262 return sm
263
263
264 def _updateprompt(ui, sub, dirty, local, remote):
264 def _updateprompt(ui, sub, dirty, local, remote):
265 if dirty:
265 if dirty:
266 msg = (_(' subrepository sources for %s differ\n'
266 msg = (_(' subrepository sources for %s differ\n'
267 'use (l)ocal source (%s) or (r)emote source (%s)?'
267 'use (l)ocal source (%s) or (r)emote source (%s)?'
268 '$$ &Local $$ &Remote')
268 '$$ &Local $$ &Remote')
269 % (subrelpath(sub), local, remote))
269 % (subrelpath(sub), local, remote))
270 else:
270 else:
271 msg = (_(' subrepository sources for %s differ (in checked out '
271 msg = (_(' subrepository sources for %s differ (in checked out '
272 'version)\n'
272 'version)\n'
273 'use (l)ocal source (%s) or (r)emote source (%s)?'
273 'use (l)ocal source (%s) or (r)emote source (%s)?'
274 '$$ &Local $$ &Remote')
274 '$$ &Local $$ &Remote')
275 % (subrelpath(sub), local, remote))
275 % (subrelpath(sub), local, remote))
276 return ui.promptchoice(msg, 0)
276 return ui.promptchoice(msg, 0)
277
277
278 def reporelpath(repo):
278 def reporelpath(repo):
279 """return path to this (sub)repo as seen from outermost repo"""
279 """return path to this (sub)repo as seen from outermost repo"""
280 parent = repo
280 parent = repo
281 while util.safehasattr(parent, '_subparent'):
281 while util.safehasattr(parent, '_subparent'):
282 parent = parent._subparent
282 parent = parent._subparent
283 return repo.root[len(pathutil.normasprefix(parent.root)):]
283 return repo.root[len(pathutil.normasprefix(parent.root)):]
284
284
285 def subrelpath(sub):
285 def subrelpath(sub):
286 """return path to this subrepo as seen from outermost repo"""
286 """return path to this subrepo as seen from outermost repo"""
287 if util.safehasattr(sub, '_relpath'):
287 if util.safehasattr(sub, '_relpath'):
288 return sub._relpath
288 return sub._relpath
289 if not util.safehasattr(sub, '_repo'):
289 if not util.safehasattr(sub, '_repo'):
290 return sub._path
290 return sub._path
291 return reporelpath(sub._repo)
291 return reporelpath(sub._repo)
292
292
293 def _abssource(repo, push=False, abort=True):
293 def _abssource(repo, push=False, abort=True):
294 """return pull/push path of repo - either based on parent repo .hgsub info
294 """return pull/push path of repo - either based on parent repo .hgsub info
295 or on the top repo config. Abort or return None if no source found."""
295 or on the top repo config. Abort or return None if no source found."""
296 if util.safehasattr(repo, '_subparent'):
296 if util.safehasattr(repo, '_subparent'):
297 source = util.url(repo._subsource)
297 source = util.url(repo._subsource)
298 if source.isabs():
298 if source.isabs():
299 return str(source)
299 return str(source)
300 source.path = posixpath.normpath(source.path)
300 source.path = posixpath.normpath(source.path)
301 parent = _abssource(repo._subparent, push, abort=False)
301 parent = _abssource(repo._subparent, push, abort=False)
302 if parent:
302 if parent:
303 parent = util.url(util.pconvert(parent))
303 parent = util.url(util.pconvert(parent))
304 parent.path = posixpath.join(parent.path or '', source.path)
304 parent.path = posixpath.join(parent.path or '', source.path)
305 parent.path = posixpath.normpath(parent.path)
305 parent.path = posixpath.normpath(parent.path)
306 return str(parent)
306 return str(parent)
307 else: # recursion reached top repo
307 else: # recursion reached top repo
308 if util.safehasattr(repo, '_subtoppath'):
308 if util.safehasattr(repo, '_subtoppath'):
309 return repo._subtoppath
309 return repo._subtoppath
310 if push and repo.ui.config('paths', 'default-push'):
310 if push and repo.ui.config('paths', 'default-push'):
311 return repo.ui.config('paths', 'default-push')
311 return repo.ui.config('paths', 'default-push')
312 if repo.ui.config('paths', 'default'):
312 if repo.ui.config('paths', 'default'):
313 return repo.ui.config('paths', 'default')
313 return repo.ui.config('paths', 'default')
314 if repo.sharedpath != repo.path:
314 if repo.sharedpath != repo.path:
315 # chop off the .hg component to get the default path form
315 # chop off the .hg component to get the default path form
316 return os.path.dirname(repo.sharedpath)
316 return os.path.dirname(repo.sharedpath)
317 if abort:
317 if abort:
318 raise util.Abort(_("default path for subrepository not found"))
318 raise util.Abort(_("default path for subrepository not found"))
319
319
320 def _sanitize(ui, path, ignore):
320 def _sanitize(ui, path, ignore):
321 for dirname, dirs, names in os.walk(path):
321 for dirname, dirs, names in os.walk(path):
322 for i, d in enumerate(dirs):
322 for i, d in enumerate(dirs):
323 if d.lower() == ignore:
323 if d.lower() == ignore:
324 del dirs[i]
324 del dirs[i]
325 break
325 break
326 if os.path.basename(dirname).lower() != '.hg':
326 if os.path.basename(dirname).lower() != '.hg':
327 continue
327 continue
328 for f in names:
328 for f in names:
329 if f.lower() == 'hgrc':
329 if f.lower() == 'hgrc':
330 ui.warn(_("warning: removing potentially hostile 'hgrc' "
330 ui.warn(_("warning: removing potentially hostile 'hgrc' "
331 "in '%s'\n") % dirname)
331 "in '%s'\n") % dirname)
332 os.unlink(os.path.join(dirname, f))
332 os.unlink(os.path.join(dirname, f))
333
333
334 def subrepo(ctx, path):
334 def subrepo(ctx, path):
335 """return instance of the right subrepo class for subrepo in path"""
335 """return instance of the right subrepo class for subrepo in path"""
336 # subrepo inherently violates our import layering rules
336 # subrepo inherently violates our import layering rules
337 # because it wants to make repo objects from deep inside the stack
337 # because it wants to make repo objects from deep inside the stack
338 # so we manually delay the circular imports to not break
338 # so we manually delay the circular imports to not break
339 # scripts that don't use our demand-loading
339 # scripts that don't use our demand-loading
340 global hg
340 global hg
341 import hg as h
341 import hg as h
342 hg = h
342 hg = h
343
343
344 pathutil.pathauditor(ctx._repo.root)(path)
344 pathutil.pathauditor(ctx._repo.root)(path)
345 state = ctx.substate[path]
345 state = ctx.substate[path]
346 if state[2] not in types:
346 if state[2] not in types:
347 raise util.Abort(_('unknown subrepo type %s') % state[2])
347 raise util.Abort(_('unknown subrepo type %s') % state[2])
348 return types[state[2]](ctx, path, state[:2])
348 return types[state[2]](ctx, path, state[:2])
349
349
350 def newcommitphase(ui, ctx):
350 def newcommitphase(ui, ctx):
351 commitphase = phases.newcommitphase(ui)
351 commitphase = phases.newcommitphase(ui)
352 substate = getattr(ctx, "substate", None)
352 substate = getattr(ctx, "substate", None)
353 if not substate:
353 if not substate:
354 return commitphase
354 return commitphase
355 check = ui.config('phases', 'checksubrepos', 'follow')
355 check = ui.config('phases', 'checksubrepos', 'follow')
356 if check not in ('ignore', 'follow', 'abort'):
356 if check not in ('ignore', 'follow', 'abort'):
357 raise util.Abort(_('invalid phases.checksubrepos configuration: %s')
357 raise util.Abort(_('invalid phases.checksubrepos configuration: %s')
358 % (check))
358 % (check))
359 if check == 'ignore':
359 if check == 'ignore':
360 return commitphase
360 return commitphase
361 maxphase = phases.public
361 maxphase = phases.public
362 maxsub = None
362 maxsub = None
363 for s in sorted(substate):
363 for s in sorted(substate):
364 sub = ctx.sub(s)
364 sub = ctx.sub(s)
365 subphase = sub.phase(substate[s][1])
365 subphase = sub.phase(substate[s][1])
366 if maxphase < subphase:
366 if maxphase < subphase:
367 maxphase = subphase
367 maxphase = subphase
368 maxsub = s
368 maxsub = s
369 if commitphase < maxphase:
369 if commitphase < maxphase:
370 if check == 'abort':
370 if check == 'abort':
371 raise util.Abort(_("can't commit in %s phase"
371 raise util.Abort(_("can't commit in %s phase"
372 " conflicting %s from subrepository %s") %
372 " conflicting %s from subrepository %s") %
373 (phases.phasenames[commitphase],
373 (phases.phasenames[commitphase],
374 phases.phasenames[maxphase], maxsub))
374 phases.phasenames[maxphase], maxsub))
375 ui.warn(_("warning: changes are committed in"
375 ui.warn(_("warning: changes are committed in"
376 " %s phase from subrepository %s\n") %
376 " %s phase from subrepository %s\n") %
377 (phases.phasenames[maxphase], maxsub))
377 (phases.phasenames[maxphase], maxsub))
378 return maxphase
378 return maxphase
379 return commitphase
379 return commitphase
380
380
381 # subrepo classes need to implement the following abstract class:
381 # subrepo classes need to implement the following abstract class:
382
382
383 class abstractsubrepo(object):
383 class abstractsubrepo(object):
384
384
385 def storeclean(self, path):
385 def storeclean(self, path):
386 """
386 """
387 returns true if the repository has not changed since it was last
387 returns true if the repository has not changed since it was last
388 cloned from or pushed to a given repository.
388 cloned from or pushed to a given repository.
389 """
389 """
390 return False
390 return False
391
391
392 def dirty(self, ignoreupdate=False):
392 def dirty(self, ignoreupdate=False):
393 """returns true if the dirstate of the subrepo is dirty or does not
393 """returns true if the dirstate of the subrepo is dirty or does not
394 match current stored state. If ignoreupdate is true, only check
394 match current stored state. If ignoreupdate is true, only check
395 whether the subrepo has uncommitted changes in its dirstate.
395 whether the subrepo has uncommitted changes in its dirstate.
396 """
396 """
397 raise NotImplementedError
397 raise NotImplementedError
398
398
399 def basestate(self):
399 def basestate(self):
400 """current working directory base state, disregarding .hgsubstate
400 """current working directory base state, disregarding .hgsubstate
401 state and working directory modifications"""
401 state and working directory modifications"""
402 raise NotImplementedError
402 raise NotImplementedError
403
403
404 def checknested(self, path):
404 def checknested(self, path):
405 """check if path is a subrepository within this repository"""
405 """check if path is a subrepository within this repository"""
406 return False
406 return False
407
407
408 def commit(self, text, user, date):
408 def commit(self, text, user, date):
409 """commit the current changes to the subrepo with the given
409 """commit the current changes to the subrepo with the given
410 log message. Use given user and date if possible. Return the
410 log message. Use given user and date if possible. Return the
411 new state of the subrepo.
411 new state of the subrepo.
412 """
412 """
413 raise NotImplementedError
413 raise NotImplementedError
414
414
415 def phase(self, state):
415 def phase(self, state):
416 """returns phase of specified state in the subrepository.
416 """returns phase of specified state in the subrepository.
417 """
417 """
418 return phases.public
418 return phases.public
419
419
420 def remove(self):
420 def remove(self):
421 """remove the subrepo
421 """remove the subrepo
422
422
423 (should verify the dirstate is not dirty first)
423 (should verify the dirstate is not dirty first)
424 """
424 """
425 raise NotImplementedError
425 raise NotImplementedError
426
426
427 def get(self, state, overwrite=False):
427 def get(self, state, overwrite=False):
428 """run whatever commands are needed to put the subrepo into
428 """run whatever commands are needed to put the subrepo into
429 this state
429 this state
430 """
430 """
431 raise NotImplementedError
431 raise NotImplementedError
432
432
433 def merge(self, state):
433 def merge(self, state):
434 """merge currently-saved state with the new state."""
434 """merge currently-saved state with the new state."""
435 raise NotImplementedError
435 raise NotImplementedError
436
436
437 def push(self, opts):
437 def push(self, opts):
438 """perform whatever action is analogous to 'hg push'
438 """perform whatever action is analogous to 'hg push'
439
439
440 This may be a no-op on some systems.
440 This may be a no-op on some systems.
441 """
441 """
442 raise NotImplementedError
442 raise NotImplementedError
443
443
444 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
444 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
445 return []
445 return []
446
446
447 def cat(self, ui, match, prefix, **opts):
447 def cat(self, ui, match, prefix, **opts):
448 return 1
448 return 1
449
449
450 def status(self, rev2, **opts):
450 def status(self, rev2, **opts):
451 return [], [], [], [], [], [], []
451 return scmutil.status([], [], [], [], [], [], [])
452
452
453 def diff(self, ui, diffopts, node2, match, prefix, **opts):
453 def diff(self, ui, diffopts, node2, match, prefix, **opts):
454 pass
454 pass
455
455
456 def outgoing(self, ui, dest, opts):
456 def outgoing(self, ui, dest, opts):
457 return 1
457 return 1
458
458
459 def incoming(self, ui, source, opts):
459 def incoming(self, ui, source, opts):
460 return 1
460 return 1
461
461
462 def files(self):
462 def files(self):
463 """return filename iterator"""
463 """return filename iterator"""
464 raise NotImplementedError
464 raise NotImplementedError
465
465
466 def filedata(self, name):
466 def filedata(self, name):
467 """return file data"""
467 """return file data"""
468 raise NotImplementedError
468 raise NotImplementedError
469
469
470 def fileflags(self, name):
470 def fileflags(self, name):
471 """return file flags"""
471 """return file flags"""
472 return ''
472 return ''
473
473
474 def archive(self, ui, archiver, prefix, match=None):
474 def archive(self, ui, archiver, prefix, match=None):
475 if match is not None:
475 if match is not None:
476 files = [f for f in self.files() if match(f)]
476 files = [f for f in self.files() if match(f)]
477 else:
477 else:
478 files = self.files()
478 files = self.files()
479 total = len(files)
479 total = len(files)
480 relpath = subrelpath(self)
480 relpath = subrelpath(self)
481 ui.progress(_('archiving (%s)') % relpath, 0,
481 ui.progress(_('archiving (%s)') % relpath, 0,
482 unit=_('files'), total=total)
482 unit=_('files'), total=total)
483 for i, name in enumerate(files):
483 for i, name in enumerate(files):
484 flags = self.fileflags(name)
484 flags = self.fileflags(name)
485 mode = 'x' in flags and 0755 or 0644
485 mode = 'x' in flags and 0755 or 0644
486 symlink = 'l' in flags
486 symlink = 'l' in flags
487 archiver.addfile(os.path.join(prefix, self._path, name),
487 archiver.addfile(os.path.join(prefix, self._path, name),
488 mode, symlink, self.filedata(name))
488 mode, symlink, self.filedata(name))
489 ui.progress(_('archiving (%s)') % relpath, i + 1,
489 ui.progress(_('archiving (%s)') % relpath, i + 1,
490 unit=_('files'), total=total)
490 unit=_('files'), total=total)
491 ui.progress(_('archiving (%s)') % relpath, None)
491 ui.progress(_('archiving (%s)') % relpath, None)
492 return total
492 return total
493
493
494 def walk(self, match):
494 def walk(self, match):
495 '''
495 '''
496 walk recursively through the directory tree, finding all files
496 walk recursively through the directory tree, finding all files
497 matched by the match function
497 matched by the match function
498 '''
498 '''
499 pass
499 pass
500
500
501 def forget(self, ui, match, prefix):
501 def forget(self, ui, match, prefix):
502 return ([], [])
502 return ([], [])
503
503
504 def revert(self, ui, substate, *pats, **opts):
504 def revert(self, ui, substate, *pats, **opts):
505 ui.warn('%s: reverting %s subrepos is unsupported\n' \
505 ui.warn('%s: reverting %s subrepos is unsupported\n' \
506 % (substate[0], substate[2]))
506 % (substate[0], substate[2]))
507 return []
507 return []
508
508
509 def shortid(self, revid):
509 def shortid(self, revid):
510 return revid
510 return revid
511
511
512 class hgsubrepo(abstractsubrepo):
512 class hgsubrepo(abstractsubrepo):
513 def __init__(self, ctx, path, state):
513 def __init__(self, ctx, path, state):
514 self._path = path
514 self._path = path
515 self._state = state
515 self._state = state
516 r = ctx._repo
516 r = ctx._repo
517 root = r.wjoin(path)
517 root = r.wjoin(path)
518 create = False
518 create = False
519 if not os.path.exists(os.path.join(root, '.hg')):
519 if not os.path.exists(os.path.join(root, '.hg')):
520 create = True
520 create = True
521 util.makedirs(root)
521 util.makedirs(root)
522 self._repo = hg.repository(r.baseui, root, create=create)
522 self._repo = hg.repository(r.baseui, root, create=create)
523 for s, k in [('ui', 'commitsubrepos')]:
523 for s, k in [('ui', 'commitsubrepos')]:
524 v = r.ui.config(s, k)
524 v = r.ui.config(s, k)
525 if v:
525 if v:
526 self._repo.ui.setconfig(s, k, v, 'subrepo')
526 self._repo.ui.setconfig(s, k, v, 'subrepo')
527 self._repo.ui.setconfig('ui', '_usedassubrepo', 'True', 'subrepo')
527 self._repo.ui.setconfig('ui', '_usedassubrepo', 'True', 'subrepo')
528 self._initrepo(r, state[0], create)
528 self._initrepo(r, state[0], create)
529
529
530 def storeclean(self, path):
530 def storeclean(self, path):
531 lock = self._repo.lock()
531 lock = self._repo.lock()
532 try:
532 try:
533 return self._storeclean(path)
533 return self._storeclean(path)
534 finally:
534 finally:
535 lock.release()
535 lock.release()
536
536
537 def _storeclean(self, path):
537 def _storeclean(self, path):
538 clean = True
538 clean = True
539 itercache = self._calcstorehash(path)
539 itercache = self._calcstorehash(path)
540 try:
540 try:
541 for filehash in self._readstorehashcache(path):
541 for filehash in self._readstorehashcache(path):
542 if filehash != itercache.next():
542 if filehash != itercache.next():
543 clean = False
543 clean = False
544 break
544 break
545 except StopIteration:
545 except StopIteration:
546 # the cached and current pull states have a different size
546 # the cached and current pull states have a different size
547 clean = False
547 clean = False
548 if clean:
548 if clean:
549 try:
549 try:
550 itercache.next()
550 itercache.next()
551 # the cached and current pull states have a different size
551 # the cached and current pull states have a different size
552 clean = False
552 clean = False
553 except StopIteration:
553 except StopIteration:
554 pass
554 pass
555 return clean
555 return clean
556
556
557 def _calcstorehash(self, remotepath):
557 def _calcstorehash(self, remotepath):
558 '''calculate a unique "store hash"
558 '''calculate a unique "store hash"
559
559
560 This method is used to to detect when there are changes that may
560 This method is used to to detect when there are changes that may
561 require a push to a given remote path.'''
561 require a push to a given remote path.'''
562 # sort the files that will be hashed in increasing (likely) file size
562 # sort the files that will be hashed in increasing (likely) file size
563 filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i')
563 filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i')
564 yield '# %s\n' % _expandedabspath(remotepath)
564 yield '# %s\n' % _expandedabspath(remotepath)
565 for relname in filelist:
565 for relname in filelist:
566 absname = os.path.normpath(self._repo.join(relname))
566 absname = os.path.normpath(self._repo.join(relname))
567 yield '%s = %s\n' % (relname, _calcfilehash(absname))
567 yield '%s = %s\n' % (relname, _calcfilehash(absname))
568
568
569 def _getstorehashcachepath(self, remotepath):
569 def _getstorehashcachepath(self, remotepath):
570 '''get a unique path for the store hash cache'''
570 '''get a unique path for the store hash cache'''
571 return self._repo.join(os.path.join(
571 return self._repo.join(os.path.join(
572 'cache', 'storehash', _getstorehashcachename(remotepath)))
572 'cache', 'storehash', _getstorehashcachename(remotepath)))
573
573
574 def _readstorehashcache(self, remotepath):
574 def _readstorehashcache(self, remotepath):
575 '''read the store hash cache for a given remote repository'''
575 '''read the store hash cache for a given remote repository'''
576 cachefile = self._getstorehashcachepath(remotepath)
576 cachefile = self._getstorehashcachepath(remotepath)
577 if not os.path.exists(cachefile):
577 if not os.path.exists(cachefile):
578 return ''
578 return ''
579 fd = open(cachefile, 'r')
579 fd = open(cachefile, 'r')
580 try:
580 try:
581 pullstate = fd.readlines()
581 pullstate = fd.readlines()
582 finally:
582 finally:
583 fd.close()
583 fd.close()
584 return pullstate
584 return pullstate
585
585
586 def _cachestorehash(self, remotepath):
586 def _cachestorehash(self, remotepath):
587 '''cache the current store hash
587 '''cache the current store hash
588
588
589 Each remote repo requires its own store hash cache, because a subrepo
589 Each remote repo requires its own store hash cache, because a subrepo
590 store may be "clean" versus a given remote repo, but not versus another
590 store may be "clean" versus a given remote repo, but not versus another
591 '''
591 '''
592 cachefile = self._getstorehashcachepath(remotepath)
592 cachefile = self._getstorehashcachepath(remotepath)
593 lock = self._repo.lock()
593 lock = self._repo.lock()
594 try:
594 try:
595 storehash = list(self._calcstorehash(remotepath))
595 storehash = list(self._calcstorehash(remotepath))
596 cachedir = os.path.dirname(cachefile)
596 cachedir = os.path.dirname(cachefile)
597 if not os.path.exists(cachedir):
597 if not os.path.exists(cachedir):
598 util.makedirs(cachedir, notindexed=True)
598 util.makedirs(cachedir, notindexed=True)
599 fd = open(cachefile, 'w')
599 fd = open(cachefile, 'w')
600 try:
600 try:
601 fd.writelines(storehash)
601 fd.writelines(storehash)
602 finally:
602 finally:
603 fd.close()
603 fd.close()
604 finally:
604 finally:
605 lock.release()
605 lock.release()
606
606
607 @annotatesubrepoerror
607 @annotatesubrepoerror
608 def _initrepo(self, parentrepo, source, create):
608 def _initrepo(self, parentrepo, source, create):
609 self._repo._subparent = parentrepo
609 self._repo._subparent = parentrepo
610 self._repo._subsource = source
610 self._repo._subsource = source
611
611
612 if create:
612 if create:
613 lines = ['[paths]\n']
613 lines = ['[paths]\n']
614
614
615 def addpathconfig(key, value):
615 def addpathconfig(key, value):
616 if value:
616 if value:
617 lines.append('%s = %s\n' % (key, value))
617 lines.append('%s = %s\n' % (key, value))
618 self._repo.ui.setconfig('paths', key, value, 'subrepo')
618 self._repo.ui.setconfig('paths', key, value, 'subrepo')
619
619
620 defpath = _abssource(self._repo, abort=False)
620 defpath = _abssource(self._repo, abort=False)
621 defpushpath = _abssource(self._repo, True, abort=False)
621 defpushpath = _abssource(self._repo, True, abort=False)
622 addpathconfig('default', defpath)
622 addpathconfig('default', defpath)
623 if defpath != defpushpath:
623 if defpath != defpushpath:
624 addpathconfig('default-push', defpushpath)
624 addpathconfig('default-push', defpushpath)
625
625
626 fp = self._repo.opener("hgrc", "w", text=True)
626 fp = self._repo.opener("hgrc", "w", text=True)
627 try:
627 try:
628 fp.write(''.join(lines))
628 fp.write(''.join(lines))
629 finally:
629 finally:
630 fp.close()
630 fp.close()
631
631
632 @annotatesubrepoerror
632 @annotatesubrepoerror
633 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
633 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
634 return cmdutil.add(ui, self._repo, match, dryrun, listsubrepos,
634 return cmdutil.add(ui, self._repo, match, dryrun, listsubrepos,
635 os.path.join(prefix, self._path), explicitonly)
635 os.path.join(prefix, self._path), explicitonly)
636
636
637 @annotatesubrepoerror
637 @annotatesubrepoerror
638 def cat(self, ui, match, prefix, **opts):
638 def cat(self, ui, match, prefix, **opts):
639 rev = self._state[1]
639 rev = self._state[1]
640 ctx = self._repo[rev]
640 ctx = self._repo[rev]
641 return cmdutil.cat(ui, self._repo, ctx, match, prefix, **opts)
641 return cmdutil.cat(ui, self._repo, ctx, match, prefix, **opts)
642
642
643 @annotatesubrepoerror
643 @annotatesubrepoerror
644 def status(self, rev2, **opts):
644 def status(self, rev2, **opts):
645 try:
645 try:
646 rev1 = self._state[1]
646 rev1 = self._state[1]
647 ctx1 = self._repo[rev1]
647 ctx1 = self._repo[rev1]
648 ctx2 = self._repo[rev2]
648 ctx2 = self._repo[rev2]
649 return self._repo.status(ctx1, ctx2, **opts)
649 return self._repo.status(ctx1, ctx2, **opts)
650 except error.RepoLookupError, inst:
650 except error.RepoLookupError, inst:
651 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
651 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
652 % (inst, subrelpath(self)))
652 % (inst, subrelpath(self)))
653 return [], [], [], [], [], [], []
653 return scmutil.status([], [], [], [], [], [], [])
654
654
655 @annotatesubrepoerror
655 @annotatesubrepoerror
656 def diff(self, ui, diffopts, node2, match, prefix, **opts):
656 def diff(self, ui, diffopts, node2, match, prefix, **opts):
657 try:
657 try:
658 node1 = node.bin(self._state[1])
658 node1 = node.bin(self._state[1])
659 # We currently expect node2 to come from substate and be
659 # We currently expect node2 to come from substate and be
660 # in hex format
660 # in hex format
661 if node2 is not None:
661 if node2 is not None:
662 node2 = node.bin(node2)
662 node2 = node.bin(node2)
663 cmdutil.diffordiffstat(ui, self._repo, diffopts,
663 cmdutil.diffordiffstat(ui, self._repo, diffopts,
664 node1, node2, match,
664 node1, node2, match,
665 prefix=posixpath.join(prefix, self._path),
665 prefix=posixpath.join(prefix, self._path),
666 listsubrepos=True, **opts)
666 listsubrepos=True, **opts)
667 except error.RepoLookupError, inst:
667 except error.RepoLookupError, inst:
668 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
668 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
669 % (inst, subrelpath(self)))
669 % (inst, subrelpath(self)))
670
670
671 @annotatesubrepoerror
671 @annotatesubrepoerror
672 def archive(self, ui, archiver, prefix, match=None):
672 def archive(self, ui, archiver, prefix, match=None):
673 self._get(self._state + ('hg',))
673 self._get(self._state + ('hg',))
674 total = abstractsubrepo.archive(self, ui, archiver, prefix, match)
674 total = abstractsubrepo.archive(self, ui, archiver, prefix, match)
675 rev = self._state[1]
675 rev = self._state[1]
676 ctx = self._repo[rev]
676 ctx = self._repo[rev]
677 for subpath in ctx.substate:
677 for subpath in ctx.substate:
678 s = subrepo(ctx, subpath)
678 s = subrepo(ctx, subpath)
679 submatch = matchmod.narrowmatcher(subpath, match)
679 submatch = matchmod.narrowmatcher(subpath, match)
680 total += s.archive(
680 total += s.archive(
681 ui, archiver, os.path.join(prefix, self._path), submatch)
681 ui, archiver, os.path.join(prefix, self._path), submatch)
682 return total
682 return total
683
683
684 @annotatesubrepoerror
684 @annotatesubrepoerror
685 def dirty(self, ignoreupdate=False):
685 def dirty(self, ignoreupdate=False):
686 r = self._state[1]
686 r = self._state[1]
687 if r == '' and not ignoreupdate: # no state recorded
687 if r == '' and not ignoreupdate: # no state recorded
688 return True
688 return True
689 w = self._repo[None]
689 w = self._repo[None]
690 if r != w.p1().hex() and not ignoreupdate:
690 if r != w.p1().hex() and not ignoreupdate:
691 # different version checked out
691 # different version checked out
692 return True
692 return True
693 return w.dirty() # working directory changed
693 return w.dirty() # working directory changed
694
694
695 def basestate(self):
695 def basestate(self):
696 return self._repo['.'].hex()
696 return self._repo['.'].hex()
697
697
698 def checknested(self, path):
698 def checknested(self, path):
699 return self._repo._checknested(self._repo.wjoin(path))
699 return self._repo._checknested(self._repo.wjoin(path))
700
700
701 @annotatesubrepoerror
701 @annotatesubrepoerror
702 def commit(self, text, user, date):
702 def commit(self, text, user, date):
703 # don't bother committing in the subrepo if it's only been
703 # don't bother committing in the subrepo if it's only been
704 # updated
704 # updated
705 if not self.dirty(True):
705 if not self.dirty(True):
706 return self._repo['.'].hex()
706 return self._repo['.'].hex()
707 self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self))
707 self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self))
708 n = self._repo.commit(text, user, date)
708 n = self._repo.commit(text, user, date)
709 if not n:
709 if not n:
710 return self._repo['.'].hex() # different version checked out
710 return self._repo['.'].hex() # different version checked out
711 return node.hex(n)
711 return node.hex(n)
712
712
713 @annotatesubrepoerror
713 @annotatesubrepoerror
714 def phase(self, state):
714 def phase(self, state):
715 return self._repo[state].phase()
715 return self._repo[state].phase()
716
716
717 @annotatesubrepoerror
717 @annotatesubrepoerror
718 def remove(self):
718 def remove(self):
719 # we can't fully delete the repository as it may contain
719 # we can't fully delete the repository as it may contain
720 # local-only history
720 # local-only history
721 self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self))
721 self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self))
722 hg.clean(self._repo, node.nullid, False)
722 hg.clean(self._repo, node.nullid, False)
723
723
724 def _get(self, state):
724 def _get(self, state):
725 source, revision, kind = state
725 source, revision, kind = state
726 if revision in self._repo.unfiltered():
726 if revision in self._repo.unfiltered():
727 return True
727 return True
728 self._repo._subsource = source
728 self._repo._subsource = source
729 srcurl = _abssource(self._repo)
729 srcurl = _abssource(self._repo)
730 other = hg.peer(self._repo, {}, srcurl)
730 other = hg.peer(self._repo, {}, srcurl)
731 if len(self._repo) == 0:
731 if len(self._repo) == 0:
732 self._repo.ui.status(_('cloning subrepo %s from %s\n')
732 self._repo.ui.status(_('cloning subrepo %s from %s\n')
733 % (subrelpath(self), srcurl))
733 % (subrelpath(self), srcurl))
734 parentrepo = self._repo._subparent
734 parentrepo = self._repo._subparent
735 shutil.rmtree(self._repo.path)
735 shutil.rmtree(self._repo.path)
736 other, cloned = hg.clone(self._repo._subparent.baseui, {},
736 other, cloned = hg.clone(self._repo._subparent.baseui, {},
737 other, self._repo.root,
737 other, self._repo.root,
738 update=False)
738 update=False)
739 self._repo = cloned.local()
739 self._repo = cloned.local()
740 self._initrepo(parentrepo, source, create=True)
740 self._initrepo(parentrepo, source, create=True)
741 self._cachestorehash(srcurl)
741 self._cachestorehash(srcurl)
742 else:
742 else:
743 self._repo.ui.status(_('pulling subrepo %s from %s\n')
743 self._repo.ui.status(_('pulling subrepo %s from %s\n')
744 % (subrelpath(self), srcurl))
744 % (subrelpath(self), srcurl))
745 cleansub = self.storeclean(srcurl)
745 cleansub = self.storeclean(srcurl)
746 exchange.pull(self._repo, other)
746 exchange.pull(self._repo, other)
747 if cleansub:
747 if cleansub:
748 # keep the repo clean after pull
748 # keep the repo clean after pull
749 self._cachestorehash(srcurl)
749 self._cachestorehash(srcurl)
750 return False
750 return False
751
751
752 @annotatesubrepoerror
752 @annotatesubrepoerror
753 def get(self, state, overwrite=False):
753 def get(self, state, overwrite=False):
754 inrepo = self._get(state)
754 inrepo = self._get(state)
755 source, revision, kind = state
755 source, revision, kind = state
756 repo = self._repo
756 repo = self._repo
757 repo.ui.debug("getting subrepo %s\n" % self._path)
757 repo.ui.debug("getting subrepo %s\n" % self._path)
758 if inrepo:
758 if inrepo:
759 urepo = repo.unfiltered()
759 urepo = repo.unfiltered()
760 ctx = urepo[revision]
760 ctx = urepo[revision]
761 if ctx.hidden():
761 if ctx.hidden():
762 urepo.ui.warn(
762 urepo.ui.warn(
763 _('revision %s in subrepo %s is hidden\n') \
763 _('revision %s in subrepo %s is hidden\n') \
764 % (revision[0:12], self._path))
764 % (revision[0:12], self._path))
765 repo = urepo
765 repo = urepo
766 hg.updaterepo(repo, revision, overwrite)
766 hg.updaterepo(repo, revision, overwrite)
767
767
768 @annotatesubrepoerror
768 @annotatesubrepoerror
769 def merge(self, state):
769 def merge(self, state):
770 self._get(state)
770 self._get(state)
771 cur = self._repo['.']
771 cur = self._repo['.']
772 dst = self._repo[state[1]]
772 dst = self._repo[state[1]]
773 anc = dst.ancestor(cur)
773 anc = dst.ancestor(cur)
774
774
775 def mergefunc():
775 def mergefunc():
776 if anc == cur and dst.branch() == cur.branch():
776 if anc == cur and dst.branch() == cur.branch():
777 self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self))
777 self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self))
778 hg.update(self._repo, state[1])
778 hg.update(self._repo, state[1])
779 elif anc == dst:
779 elif anc == dst:
780 self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self))
780 self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self))
781 else:
781 else:
782 self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self))
782 self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self))
783 hg.merge(self._repo, state[1], remind=False)
783 hg.merge(self._repo, state[1], remind=False)
784
784
785 wctx = self._repo[None]
785 wctx = self._repo[None]
786 if self.dirty():
786 if self.dirty():
787 if anc != dst:
787 if anc != dst:
788 if _updateprompt(self._repo.ui, self, wctx.dirty(), cur, dst):
788 if _updateprompt(self._repo.ui, self, wctx.dirty(), cur, dst):
789 mergefunc()
789 mergefunc()
790 else:
790 else:
791 mergefunc()
791 mergefunc()
792 else:
792 else:
793 mergefunc()
793 mergefunc()
794
794
795 @annotatesubrepoerror
795 @annotatesubrepoerror
796 def push(self, opts):
796 def push(self, opts):
797 force = opts.get('force')
797 force = opts.get('force')
798 newbranch = opts.get('new_branch')
798 newbranch = opts.get('new_branch')
799 ssh = opts.get('ssh')
799 ssh = opts.get('ssh')
800
800
801 # push subrepos depth-first for coherent ordering
801 # push subrepos depth-first for coherent ordering
802 c = self._repo['']
802 c = self._repo['']
803 subs = c.substate # only repos that are committed
803 subs = c.substate # only repos that are committed
804 for s in sorted(subs):
804 for s in sorted(subs):
805 if c.sub(s).push(opts) == 0:
805 if c.sub(s).push(opts) == 0:
806 return False
806 return False
807
807
808 dsturl = _abssource(self._repo, True)
808 dsturl = _abssource(self._repo, True)
809 if not force:
809 if not force:
810 if self.storeclean(dsturl):
810 if self.storeclean(dsturl):
811 self._repo.ui.status(
811 self._repo.ui.status(
812 _('no changes made to subrepo %s since last push to %s\n')
812 _('no changes made to subrepo %s since last push to %s\n')
813 % (subrelpath(self), dsturl))
813 % (subrelpath(self), dsturl))
814 return None
814 return None
815 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
815 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
816 (subrelpath(self), dsturl))
816 (subrelpath(self), dsturl))
817 other = hg.peer(self._repo, {'ssh': ssh}, dsturl)
817 other = hg.peer(self._repo, {'ssh': ssh}, dsturl)
818 res = exchange.push(self._repo, other, force, newbranch=newbranch)
818 res = exchange.push(self._repo, other, force, newbranch=newbranch)
819
819
820 # the repo is now clean
820 # the repo is now clean
821 self._cachestorehash(dsturl)
821 self._cachestorehash(dsturl)
822 return res.cgresult
822 return res.cgresult
823
823
824 @annotatesubrepoerror
824 @annotatesubrepoerror
825 def outgoing(self, ui, dest, opts):
825 def outgoing(self, ui, dest, opts):
826 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
826 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
827
827
828 @annotatesubrepoerror
828 @annotatesubrepoerror
829 def incoming(self, ui, source, opts):
829 def incoming(self, ui, source, opts):
830 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
830 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
831
831
832 @annotatesubrepoerror
832 @annotatesubrepoerror
833 def files(self):
833 def files(self):
834 rev = self._state[1]
834 rev = self._state[1]
835 ctx = self._repo[rev]
835 ctx = self._repo[rev]
836 return ctx.manifest()
836 return ctx.manifest()
837
837
838 def filedata(self, name):
838 def filedata(self, name):
839 rev = self._state[1]
839 rev = self._state[1]
840 return self._repo[rev][name].data()
840 return self._repo[rev][name].data()
841
841
842 def fileflags(self, name):
842 def fileflags(self, name):
843 rev = self._state[1]
843 rev = self._state[1]
844 ctx = self._repo[rev]
844 ctx = self._repo[rev]
845 return ctx.flags(name)
845 return ctx.flags(name)
846
846
847 def walk(self, match):
847 def walk(self, match):
848 ctx = self._repo[None]
848 ctx = self._repo[None]
849 return ctx.walk(match)
849 return ctx.walk(match)
850
850
851 @annotatesubrepoerror
851 @annotatesubrepoerror
852 def forget(self, ui, match, prefix):
852 def forget(self, ui, match, prefix):
853 return cmdutil.forget(ui, self._repo, match,
853 return cmdutil.forget(ui, self._repo, match,
854 os.path.join(prefix, self._path), True)
854 os.path.join(prefix, self._path), True)
855
855
856 @annotatesubrepoerror
856 @annotatesubrepoerror
857 def revert(self, ui, substate, *pats, **opts):
857 def revert(self, ui, substate, *pats, **opts):
858 # reverting a subrepo is a 2 step process:
858 # reverting a subrepo is a 2 step process:
859 # 1. if the no_backup is not set, revert all modified
859 # 1. if the no_backup is not set, revert all modified
860 # files inside the subrepo
860 # files inside the subrepo
861 # 2. update the subrepo to the revision specified in
861 # 2. update the subrepo to the revision specified in
862 # the corresponding substate dictionary
862 # the corresponding substate dictionary
863 ui.status(_('reverting subrepo %s\n') % substate[0])
863 ui.status(_('reverting subrepo %s\n') % substate[0])
864 if not opts.get('no_backup'):
864 if not opts.get('no_backup'):
865 # Revert all files on the subrepo, creating backups
865 # Revert all files on the subrepo, creating backups
866 # Note that this will not recursively revert subrepos
866 # Note that this will not recursively revert subrepos
867 # We could do it if there was a set:subrepos() predicate
867 # We could do it if there was a set:subrepos() predicate
868 opts = opts.copy()
868 opts = opts.copy()
869 opts['date'] = None
869 opts['date'] = None
870 opts['rev'] = substate[1]
870 opts['rev'] = substate[1]
871
871
872 pats = []
872 pats = []
873 if not opts.get('all'):
873 if not opts.get('all'):
874 pats = ['set:modified()']
874 pats = ['set:modified()']
875 self.filerevert(ui, *pats, **opts)
875 self.filerevert(ui, *pats, **opts)
876
876
877 # Update the repo to the revision specified in the given substate
877 # Update the repo to the revision specified in the given substate
878 self.get(substate, overwrite=True)
878 self.get(substate, overwrite=True)
879
879
880 def filerevert(self, ui, *pats, **opts):
880 def filerevert(self, ui, *pats, **opts):
881 ctx = self._repo[opts['rev']]
881 ctx = self._repo[opts['rev']]
882 parents = self._repo.dirstate.parents()
882 parents = self._repo.dirstate.parents()
883 if opts.get('all'):
883 if opts.get('all'):
884 pats = ['set:modified()']
884 pats = ['set:modified()']
885 else:
885 else:
886 pats = []
886 pats = []
887 cmdutil.revert(ui, self._repo, ctx, parents, *pats, **opts)
887 cmdutil.revert(ui, self._repo, ctx, parents, *pats, **opts)
888
888
889 def shortid(self, revid):
889 def shortid(self, revid):
890 return revid[:12]
890 return revid[:12]
891
891
892 class svnsubrepo(abstractsubrepo):
892 class svnsubrepo(abstractsubrepo):
893 def __init__(self, ctx, path, state):
893 def __init__(self, ctx, path, state):
894 self._path = path
894 self._path = path
895 self._state = state
895 self._state = state
896 self._ctx = ctx
896 self._ctx = ctx
897 self._ui = ctx._repo.ui
897 self._ui = ctx._repo.ui
898 self._exe = util.findexe('svn')
898 self._exe = util.findexe('svn')
899 if not self._exe:
899 if not self._exe:
900 raise util.Abort(_("'svn' executable not found for subrepo '%s'")
900 raise util.Abort(_("'svn' executable not found for subrepo '%s'")
901 % self._path)
901 % self._path)
902
902
903 def _svncommand(self, commands, filename='', failok=False):
903 def _svncommand(self, commands, filename='', failok=False):
904 cmd = [self._exe]
904 cmd = [self._exe]
905 extrakw = {}
905 extrakw = {}
906 if not self._ui.interactive():
906 if not self._ui.interactive():
907 # Making stdin be a pipe should prevent svn from behaving
907 # Making stdin be a pipe should prevent svn from behaving
908 # interactively even if we can't pass --non-interactive.
908 # interactively even if we can't pass --non-interactive.
909 extrakw['stdin'] = subprocess.PIPE
909 extrakw['stdin'] = subprocess.PIPE
910 # Starting in svn 1.5 --non-interactive is a global flag
910 # Starting in svn 1.5 --non-interactive is a global flag
911 # instead of being per-command, but we need to support 1.4 so
911 # instead of being per-command, but we need to support 1.4 so
912 # we have to be intelligent about what commands take
912 # we have to be intelligent about what commands take
913 # --non-interactive.
913 # --non-interactive.
914 if commands[0] in ('update', 'checkout', 'commit'):
914 if commands[0] in ('update', 'checkout', 'commit'):
915 cmd.append('--non-interactive')
915 cmd.append('--non-interactive')
916 cmd.extend(commands)
916 cmd.extend(commands)
917 if filename is not None:
917 if filename is not None:
918 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
918 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
919 cmd.append(path)
919 cmd.append(path)
920 env = dict(os.environ)
920 env = dict(os.environ)
921 # Avoid localized output, preserve current locale for everything else.
921 # Avoid localized output, preserve current locale for everything else.
922 lc_all = env.get('LC_ALL')
922 lc_all = env.get('LC_ALL')
923 if lc_all:
923 if lc_all:
924 env['LANG'] = lc_all
924 env['LANG'] = lc_all
925 del env['LC_ALL']
925 del env['LC_ALL']
926 env['LC_MESSAGES'] = 'C'
926 env['LC_MESSAGES'] = 'C'
927 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
927 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
928 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
928 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
929 universal_newlines=True, env=env, **extrakw)
929 universal_newlines=True, env=env, **extrakw)
930 stdout, stderr = p.communicate()
930 stdout, stderr = p.communicate()
931 stderr = stderr.strip()
931 stderr = stderr.strip()
932 if not failok:
932 if not failok:
933 if p.returncode:
933 if p.returncode:
934 raise util.Abort(stderr or 'exited with code %d' % p.returncode)
934 raise util.Abort(stderr or 'exited with code %d' % p.returncode)
935 if stderr:
935 if stderr:
936 self._ui.warn(stderr + '\n')
936 self._ui.warn(stderr + '\n')
937 return stdout, stderr
937 return stdout, stderr
938
938
939 @propertycache
939 @propertycache
940 def _svnversion(self):
940 def _svnversion(self):
941 output, err = self._svncommand(['--version', '--quiet'], filename=None)
941 output, err = self._svncommand(['--version', '--quiet'], filename=None)
942 m = re.search(r'^(\d+)\.(\d+)', output)
942 m = re.search(r'^(\d+)\.(\d+)', output)
943 if not m:
943 if not m:
944 raise util.Abort(_('cannot retrieve svn tool version'))
944 raise util.Abort(_('cannot retrieve svn tool version'))
945 return (int(m.group(1)), int(m.group(2)))
945 return (int(m.group(1)), int(m.group(2)))
946
946
947 def _wcrevs(self):
947 def _wcrevs(self):
948 # Get the working directory revision as well as the last
948 # Get the working directory revision as well as the last
949 # commit revision so we can compare the subrepo state with
949 # commit revision so we can compare the subrepo state with
950 # both. We used to store the working directory one.
950 # both. We used to store the working directory one.
951 output, err = self._svncommand(['info', '--xml'])
951 output, err = self._svncommand(['info', '--xml'])
952 doc = xml.dom.minidom.parseString(output)
952 doc = xml.dom.minidom.parseString(output)
953 entries = doc.getElementsByTagName('entry')
953 entries = doc.getElementsByTagName('entry')
954 lastrev, rev = '0', '0'
954 lastrev, rev = '0', '0'
955 if entries:
955 if entries:
956 rev = str(entries[0].getAttribute('revision')) or '0'
956 rev = str(entries[0].getAttribute('revision')) or '0'
957 commits = entries[0].getElementsByTagName('commit')
957 commits = entries[0].getElementsByTagName('commit')
958 if commits:
958 if commits:
959 lastrev = str(commits[0].getAttribute('revision')) or '0'
959 lastrev = str(commits[0].getAttribute('revision')) or '0'
960 return (lastrev, rev)
960 return (lastrev, rev)
961
961
962 def _wcrev(self):
962 def _wcrev(self):
963 return self._wcrevs()[0]
963 return self._wcrevs()[0]
964
964
965 def _wcchanged(self):
965 def _wcchanged(self):
966 """Return (changes, extchanges, missing) where changes is True
966 """Return (changes, extchanges, missing) where changes is True
967 if the working directory was changed, extchanges is
967 if the working directory was changed, extchanges is
968 True if any of these changes concern an external entry and missing
968 True if any of these changes concern an external entry and missing
969 is True if any change is a missing entry.
969 is True if any change is a missing entry.
970 """
970 """
971 output, err = self._svncommand(['status', '--xml'])
971 output, err = self._svncommand(['status', '--xml'])
972 externals, changes, missing = [], [], []
972 externals, changes, missing = [], [], []
973 doc = xml.dom.minidom.parseString(output)
973 doc = xml.dom.minidom.parseString(output)
974 for e in doc.getElementsByTagName('entry'):
974 for e in doc.getElementsByTagName('entry'):
975 s = e.getElementsByTagName('wc-status')
975 s = e.getElementsByTagName('wc-status')
976 if not s:
976 if not s:
977 continue
977 continue
978 item = s[0].getAttribute('item')
978 item = s[0].getAttribute('item')
979 props = s[0].getAttribute('props')
979 props = s[0].getAttribute('props')
980 path = e.getAttribute('path')
980 path = e.getAttribute('path')
981 if item == 'external':
981 if item == 'external':
982 externals.append(path)
982 externals.append(path)
983 elif item == 'missing':
983 elif item == 'missing':
984 missing.append(path)
984 missing.append(path)
985 if (item not in ('', 'normal', 'unversioned', 'external')
985 if (item not in ('', 'normal', 'unversioned', 'external')
986 or props not in ('', 'none', 'normal')):
986 or props not in ('', 'none', 'normal')):
987 changes.append(path)
987 changes.append(path)
988 for path in changes:
988 for path in changes:
989 for ext in externals:
989 for ext in externals:
990 if path == ext or path.startswith(ext + os.sep):
990 if path == ext or path.startswith(ext + os.sep):
991 return True, True, bool(missing)
991 return True, True, bool(missing)
992 return bool(changes), False, bool(missing)
992 return bool(changes), False, bool(missing)
993
993
994 def dirty(self, ignoreupdate=False):
994 def dirty(self, ignoreupdate=False):
995 if not self._wcchanged()[0]:
995 if not self._wcchanged()[0]:
996 if self._state[1] in self._wcrevs() or ignoreupdate:
996 if self._state[1] in self._wcrevs() or ignoreupdate:
997 return False
997 return False
998 return True
998 return True
999
999
1000 def basestate(self):
1000 def basestate(self):
1001 lastrev, rev = self._wcrevs()
1001 lastrev, rev = self._wcrevs()
1002 if lastrev != rev:
1002 if lastrev != rev:
1003 # Last committed rev is not the same than rev. We would
1003 # Last committed rev is not the same than rev. We would
1004 # like to take lastrev but we do not know if the subrepo
1004 # like to take lastrev but we do not know if the subrepo
1005 # URL exists at lastrev. Test it and fallback to rev it
1005 # URL exists at lastrev. Test it and fallback to rev it
1006 # is not there.
1006 # is not there.
1007 try:
1007 try:
1008 self._svncommand(['list', '%s@%s' % (self._state[0], lastrev)])
1008 self._svncommand(['list', '%s@%s' % (self._state[0], lastrev)])
1009 return lastrev
1009 return lastrev
1010 except error.Abort:
1010 except error.Abort:
1011 pass
1011 pass
1012 return rev
1012 return rev
1013
1013
1014 @annotatesubrepoerror
1014 @annotatesubrepoerror
1015 def commit(self, text, user, date):
1015 def commit(self, text, user, date):
1016 # user and date are out of our hands since svn is centralized
1016 # user and date are out of our hands since svn is centralized
1017 changed, extchanged, missing = self._wcchanged()
1017 changed, extchanged, missing = self._wcchanged()
1018 if not changed:
1018 if not changed:
1019 return self.basestate()
1019 return self.basestate()
1020 if extchanged:
1020 if extchanged:
1021 # Do not try to commit externals
1021 # Do not try to commit externals
1022 raise util.Abort(_('cannot commit svn externals'))
1022 raise util.Abort(_('cannot commit svn externals'))
1023 if missing:
1023 if missing:
1024 # svn can commit with missing entries but aborting like hg
1024 # svn can commit with missing entries but aborting like hg
1025 # seems a better approach.
1025 # seems a better approach.
1026 raise util.Abort(_('cannot commit missing svn entries'))
1026 raise util.Abort(_('cannot commit missing svn entries'))
1027 commitinfo, err = self._svncommand(['commit', '-m', text])
1027 commitinfo, err = self._svncommand(['commit', '-m', text])
1028 self._ui.status(commitinfo)
1028 self._ui.status(commitinfo)
1029 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
1029 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
1030 if not newrev:
1030 if not newrev:
1031 if not commitinfo.strip():
1031 if not commitinfo.strip():
1032 # Sometimes, our definition of "changed" differs from
1032 # Sometimes, our definition of "changed" differs from
1033 # svn one. For instance, svn ignores missing files
1033 # svn one. For instance, svn ignores missing files
1034 # when committing. If there are only missing files, no
1034 # when committing. If there are only missing files, no
1035 # commit is made, no output and no error code.
1035 # commit is made, no output and no error code.
1036 raise util.Abort(_('failed to commit svn changes'))
1036 raise util.Abort(_('failed to commit svn changes'))
1037 raise util.Abort(commitinfo.splitlines()[-1])
1037 raise util.Abort(commitinfo.splitlines()[-1])
1038 newrev = newrev.groups()[0]
1038 newrev = newrev.groups()[0]
1039 self._ui.status(self._svncommand(['update', '-r', newrev])[0])
1039 self._ui.status(self._svncommand(['update', '-r', newrev])[0])
1040 return newrev
1040 return newrev
1041
1041
1042 @annotatesubrepoerror
1042 @annotatesubrepoerror
1043 def remove(self):
1043 def remove(self):
1044 if self.dirty():
1044 if self.dirty():
1045 self._ui.warn(_('not removing repo %s because '
1045 self._ui.warn(_('not removing repo %s because '
1046 'it has changes.\n') % self._path)
1046 'it has changes.\n') % self._path)
1047 return
1047 return
1048 self._ui.note(_('removing subrepo %s\n') % self._path)
1048 self._ui.note(_('removing subrepo %s\n') % self._path)
1049
1049
1050 def onerror(function, path, excinfo):
1050 def onerror(function, path, excinfo):
1051 if function is not os.remove:
1051 if function is not os.remove:
1052 raise
1052 raise
1053 # read-only files cannot be unlinked under Windows
1053 # read-only files cannot be unlinked under Windows
1054 s = os.stat(path)
1054 s = os.stat(path)
1055 if (s.st_mode & stat.S_IWRITE) != 0:
1055 if (s.st_mode & stat.S_IWRITE) != 0:
1056 raise
1056 raise
1057 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
1057 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
1058 os.remove(path)
1058 os.remove(path)
1059
1059
1060 path = self._ctx._repo.wjoin(self._path)
1060 path = self._ctx._repo.wjoin(self._path)
1061 shutil.rmtree(path, onerror=onerror)
1061 shutil.rmtree(path, onerror=onerror)
1062 try:
1062 try:
1063 os.removedirs(os.path.dirname(path))
1063 os.removedirs(os.path.dirname(path))
1064 except OSError:
1064 except OSError:
1065 pass
1065 pass
1066
1066
1067 @annotatesubrepoerror
1067 @annotatesubrepoerror
1068 def get(self, state, overwrite=False):
1068 def get(self, state, overwrite=False):
1069 if overwrite:
1069 if overwrite:
1070 self._svncommand(['revert', '--recursive'])
1070 self._svncommand(['revert', '--recursive'])
1071 args = ['checkout']
1071 args = ['checkout']
1072 if self._svnversion >= (1, 5):
1072 if self._svnversion >= (1, 5):
1073 args.append('--force')
1073 args.append('--force')
1074 # The revision must be specified at the end of the URL to properly
1074 # The revision must be specified at the end of the URL to properly
1075 # update to a directory which has since been deleted and recreated.
1075 # update to a directory which has since been deleted and recreated.
1076 args.append('%s@%s' % (state[0], state[1]))
1076 args.append('%s@%s' % (state[0], state[1]))
1077 status, err = self._svncommand(args, failok=True)
1077 status, err = self._svncommand(args, failok=True)
1078 _sanitize(self._ui, self._ctx._repo.wjoin(self._path), '.svn')
1078 _sanitize(self._ui, self._ctx._repo.wjoin(self._path), '.svn')
1079 if not re.search('Checked out revision [0-9]+.', status):
1079 if not re.search('Checked out revision [0-9]+.', status):
1080 if ('is already a working copy for a different URL' in err
1080 if ('is already a working copy for a different URL' in err
1081 and (self._wcchanged()[:2] == (False, False))):
1081 and (self._wcchanged()[:2] == (False, False))):
1082 # obstructed but clean working copy, so just blow it away.
1082 # obstructed but clean working copy, so just blow it away.
1083 self.remove()
1083 self.remove()
1084 self.get(state, overwrite=False)
1084 self.get(state, overwrite=False)
1085 return
1085 return
1086 raise util.Abort((status or err).splitlines()[-1])
1086 raise util.Abort((status or err).splitlines()[-1])
1087 self._ui.status(status)
1087 self._ui.status(status)
1088
1088
1089 @annotatesubrepoerror
1089 @annotatesubrepoerror
1090 def merge(self, state):
1090 def merge(self, state):
1091 old = self._state[1]
1091 old = self._state[1]
1092 new = state[1]
1092 new = state[1]
1093 wcrev = self._wcrev()
1093 wcrev = self._wcrev()
1094 if new != wcrev:
1094 if new != wcrev:
1095 dirty = old == wcrev or self._wcchanged()[0]
1095 dirty = old == wcrev or self._wcchanged()[0]
1096 if _updateprompt(self._ui, self, dirty, wcrev, new):
1096 if _updateprompt(self._ui, self, dirty, wcrev, new):
1097 self.get(state, False)
1097 self.get(state, False)
1098
1098
1099 def push(self, opts):
1099 def push(self, opts):
1100 # push is a no-op for SVN
1100 # push is a no-op for SVN
1101 return True
1101 return True
1102
1102
1103 @annotatesubrepoerror
1103 @annotatesubrepoerror
1104 def files(self):
1104 def files(self):
1105 output = self._svncommand(['list', '--recursive', '--xml'])[0]
1105 output = self._svncommand(['list', '--recursive', '--xml'])[0]
1106 doc = xml.dom.minidom.parseString(output)
1106 doc = xml.dom.minidom.parseString(output)
1107 paths = []
1107 paths = []
1108 for e in doc.getElementsByTagName('entry'):
1108 for e in doc.getElementsByTagName('entry'):
1109 kind = str(e.getAttribute('kind'))
1109 kind = str(e.getAttribute('kind'))
1110 if kind != 'file':
1110 if kind != 'file':
1111 continue
1111 continue
1112 name = ''.join(c.data for c
1112 name = ''.join(c.data for c
1113 in e.getElementsByTagName('name')[0].childNodes
1113 in e.getElementsByTagName('name')[0].childNodes
1114 if c.nodeType == c.TEXT_NODE)
1114 if c.nodeType == c.TEXT_NODE)
1115 paths.append(name.encode('utf-8'))
1115 paths.append(name.encode('utf-8'))
1116 return paths
1116 return paths
1117
1117
1118 def filedata(self, name):
1118 def filedata(self, name):
1119 return self._svncommand(['cat'], name)[0]
1119 return self._svncommand(['cat'], name)[0]
1120
1120
1121
1121
1122 class gitsubrepo(abstractsubrepo):
1122 class gitsubrepo(abstractsubrepo):
1123 def __init__(self, ctx, path, state):
1123 def __init__(self, ctx, path, state):
1124 self._state = state
1124 self._state = state
1125 self._ctx = ctx
1125 self._ctx = ctx
1126 self._path = path
1126 self._path = path
1127 self._relpath = os.path.join(reporelpath(ctx._repo), path)
1127 self._relpath = os.path.join(reporelpath(ctx._repo), path)
1128 self._abspath = ctx._repo.wjoin(path)
1128 self._abspath = ctx._repo.wjoin(path)
1129 self._subparent = ctx._repo
1129 self._subparent = ctx._repo
1130 self._ui = ctx._repo.ui
1130 self._ui = ctx._repo.ui
1131 self._ensuregit()
1131 self._ensuregit()
1132
1132
1133 def _ensuregit(self):
1133 def _ensuregit(self):
1134 try:
1134 try:
1135 self._gitexecutable = 'git'
1135 self._gitexecutable = 'git'
1136 out, err = self._gitnodir(['--version'])
1136 out, err = self._gitnodir(['--version'])
1137 except OSError, e:
1137 except OSError, e:
1138 if e.errno != 2 or os.name != 'nt':
1138 if e.errno != 2 or os.name != 'nt':
1139 raise
1139 raise
1140 self._gitexecutable = 'git.cmd'
1140 self._gitexecutable = 'git.cmd'
1141 out, err = self._gitnodir(['--version'])
1141 out, err = self._gitnodir(['--version'])
1142 versionstatus = self._checkversion(out)
1142 versionstatus = self._checkversion(out)
1143 if versionstatus == 'unknown':
1143 if versionstatus == 'unknown':
1144 self._ui.warn(_('cannot retrieve git version\n'))
1144 self._ui.warn(_('cannot retrieve git version\n'))
1145 elif versionstatus == 'abort':
1145 elif versionstatus == 'abort':
1146 raise util.Abort(_('git subrepo requires at least 1.6.0 or later'))
1146 raise util.Abort(_('git subrepo requires at least 1.6.0 or later'))
1147 elif versionstatus == 'warning':
1147 elif versionstatus == 'warning':
1148 self._ui.warn(_('git subrepo requires at least 1.6.0 or later\n'))
1148 self._ui.warn(_('git subrepo requires at least 1.6.0 or later\n'))
1149
1149
1150 @staticmethod
1150 @staticmethod
1151 def _checkversion(out):
1151 def _checkversion(out):
1152 '''ensure git version is new enough
1152 '''ensure git version is new enough
1153
1153
1154 >>> _checkversion = gitsubrepo._checkversion
1154 >>> _checkversion = gitsubrepo._checkversion
1155 >>> _checkversion('git version 1.6.0')
1155 >>> _checkversion('git version 1.6.0')
1156 'ok'
1156 'ok'
1157 >>> _checkversion('git version 1.8.5')
1157 >>> _checkversion('git version 1.8.5')
1158 'ok'
1158 'ok'
1159 >>> _checkversion('git version 1.4.0')
1159 >>> _checkversion('git version 1.4.0')
1160 'abort'
1160 'abort'
1161 >>> _checkversion('git version 1.5.0')
1161 >>> _checkversion('git version 1.5.0')
1162 'warning'
1162 'warning'
1163 >>> _checkversion('git version 1.9-rc0')
1163 >>> _checkversion('git version 1.9-rc0')
1164 'ok'
1164 'ok'
1165 >>> _checkversion('git version 1.9.0.265.g81cdec2')
1165 >>> _checkversion('git version 1.9.0.265.g81cdec2')
1166 'ok'
1166 'ok'
1167 >>> _checkversion('git version 1.9.0.GIT')
1167 >>> _checkversion('git version 1.9.0.GIT')
1168 'ok'
1168 'ok'
1169 >>> _checkversion('git version 12345')
1169 >>> _checkversion('git version 12345')
1170 'unknown'
1170 'unknown'
1171 >>> _checkversion('no')
1171 >>> _checkversion('no')
1172 'unknown'
1172 'unknown'
1173 '''
1173 '''
1174 m = re.search(r'^git version (\d+)\.(\d+)', out)
1174 m = re.search(r'^git version (\d+)\.(\d+)', out)
1175 if not m:
1175 if not m:
1176 return 'unknown'
1176 return 'unknown'
1177 version = (int(m.group(1)), int(m.group(2)))
1177 version = (int(m.group(1)), int(m.group(2)))
1178 # git 1.4.0 can't work at all, but 1.5.X can in at least some cases,
1178 # git 1.4.0 can't work at all, but 1.5.X can in at least some cases,
1179 # despite the docstring comment. For now, error on 1.4.0, warn on
1179 # despite the docstring comment. For now, error on 1.4.0, warn on
1180 # 1.5.0 but attempt to continue.
1180 # 1.5.0 but attempt to continue.
1181 if version < (1, 5):
1181 if version < (1, 5):
1182 return 'abort'
1182 return 'abort'
1183 elif version < (1, 6):
1183 elif version < (1, 6):
1184 return 'warning'
1184 return 'warning'
1185 return 'ok'
1185 return 'ok'
1186
1186
1187 def _gitcommand(self, commands, env=None, stream=False):
1187 def _gitcommand(self, commands, env=None, stream=False):
1188 return self._gitdir(commands, env=env, stream=stream)[0]
1188 return self._gitdir(commands, env=env, stream=stream)[0]
1189
1189
1190 def _gitdir(self, commands, env=None, stream=False):
1190 def _gitdir(self, commands, env=None, stream=False):
1191 return self._gitnodir(commands, env=env, stream=stream,
1191 return self._gitnodir(commands, env=env, stream=stream,
1192 cwd=self._abspath)
1192 cwd=self._abspath)
1193
1193
1194 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
1194 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
1195 """Calls the git command
1195 """Calls the git command
1196
1196
1197 The methods tries to call the git command. versions prior to 1.6.0
1197 The methods tries to call the git command. versions prior to 1.6.0
1198 are not supported and very probably fail.
1198 are not supported and very probably fail.
1199 """
1199 """
1200 self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
1200 self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
1201 # unless ui.quiet is set, print git's stderr,
1201 # unless ui.quiet is set, print git's stderr,
1202 # which is mostly progress and useful info
1202 # which is mostly progress and useful info
1203 errpipe = None
1203 errpipe = None
1204 if self._ui.quiet:
1204 if self._ui.quiet:
1205 errpipe = open(os.devnull, 'w')
1205 errpipe = open(os.devnull, 'w')
1206 p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1,
1206 p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1,
1207 cwd=cwd, env=env, close_fds=util.closefds,
1207 cwd=cwd, env=env, close_fds=util.closefds,
1208 stdout=subprocess.PIPE, stderr=errpipe)
1208 stdout=subprocess.PIPE, stderr=errpipe)
1209 if stream:
1209 if stream:
1210 return p.stdout, None
1210 return p.stdout, None
1211
1211
1212 retdata = p.stdout.read().strip()
1212 retdata = p.stdout.read().strip()
1213 # wait for the child to exit to avoid race condition.
1213 # wait for the child to exit to avoid race condition.
1214 p.wait()
1214 p.wait()
1215
1215
1216 if p.returncode != 0 and p.returncode != 1:
1216 if p.returncode != 0 and p.returncode != 1:
1217 # there are certain error codes that are ok
1217 # there are certain error codes that are ok
1218 command = commands[0]
1218 command = commands[0]
1219 if command in ('cat-file', 'symbolic-ref'):
1219 if command in ('cat-file', 'symbolic-ref'):
1220 return retdata, p.returncode
1220 return retdata, p.returncode
1221 # for all others, abort
1221 # for all others, abort
1222 raise util.Abort('git %s error %d in %s' %
1222 raise util.Abort('git %s error %d in %s' %
1223 (command, p.returncode, self._relpath))
1223 (command, p.returncode, self._relpath))
1224
1224
1225 return retdata, p.returncode
1225 return retdata, p.returncode
1226
1226
1227 def _gitmissing(self):
1227 def _gitmissing(self):
1228 return not os.path.exists(os.path.join(self._abspath, '.git'))
1228 return not os.path.exists(os.path.join(self._abspath, '.git'))
1229
1229
1230 def _gitstate(self):
1230 def _gitstate(self):
1231 return self._gitcommand(['rev-parse', 'HEAD'])
1231 return self._gitcommand(['rev-parse', 'HEAD'])
1232
1232
1233 def _gitcurrentbranch(self):
1233 def _gitcurrentbranch(self):
1234 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
1234 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
1235 if err:
1235 if err:
1236 current = None
1236 current = None
1237 return current
1237 return current
1238
1238
1239 def _gitremote(self, remote):
1239 def _gitremote(self, remote):
1240 out = self._gitcommand(['remote', 'show', '-n', remote])
1240 out = self._gitcommand(['remote', 'show', '-n', remote])
1241 line = out.split('\n')[1]
1241 line = out.split('\n')[1]
1242 i = line.index('URL: ') + len('URL: ')
1242 i = line.index('URL: ') + len('URL: ')
1243 return line[i:]
1243 return line[i:]
1244
1244
1245 def _githavelocally(self, revision):
1245 def _githavelocally(self, revision):
1246 out, code = self._gitdir(['cat-file', '-e', revision])
1246 out, code = self._gitdir(['cat-file', '-e', revision])
1247 return code == 0
1247 return code == 0
1248
1248
1249 def _gitisancestor(self, r1, r2):
1249 def _gitisancestor(self, r1, r2):
1250 base = self._gitcommand(['merge-base', r1, r2])
1250 base = self._gitcommand(['merge-base', r1, r2])
1251 return base == r1
1251 return base == r1
1252
1252
1253 def _gitisbare(self):
1253 def _gitisbare(self):
1254 return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
1254 return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
1255
1255
1256 def _gitupdatestat(self):
1256 def _gitupdatestat(self):
1257 """This must be run before git diff-index.
1257 """This must be run before git diff-index.
1258 diff-index only looks at changes to file stat;
1258 diff-index only looks at changes to file stat;
1259 this command looks at file contents and updates the stat."""
1259 this command looks at file contents and updates the stat."""
1260 self._gitcommand(['update-index', '-q', '--refresh'])
1260 self._gitcommand(['update-index', '-q', '--refresh'])
1261
1261
1262 def _gitbranchmap(self):
1262 def _gitbranchmap(self):
1263 '''returns 2 things:
1263 '''returns 2 things:
1264 a map from git branch to revision
1264 a map from git branch to revision
1265 a map from revision to branches'''
1265 a map from revision to branches'''
1266 branch2rev = {}
1266 branch2rev = {}
1267 rev2branch = {}
1267 rev2branch = {}
1268
1268
1269 out = self._gitcommand(['for-each-ref', '--format',
1269 out = self._gitcommand(['for-each-ref', '--format',
1270 '%(objectname) %(refname)'])
1270 '%(objectname) %(refname)'])
1271 for line in out.split('\n'):
1271 for line in out.split('\n'):
1272 revision, ref = line.split(' ')
1272 revision, ref = line.split(' ')
1273 if (not ref.startswith('refs/heads/') and
1273 if (not ref.startswith('refs/heads/') and
1274 not ref.startswith('refs/remotes/')):
1274 not ref.startswith('refs/remotes/')):
1275 continue
1275 continue
1276 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
1276 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
1277 continue # ignore remote/HEAD redirects
1277 continue # ignore remote/HEAD redirects
1278 branch2rev[ref] = revision
1278 branch2rev[ref] = revision
1279 rev2branch.setdefault(revision, []).append(ref)
1279 rev2branch.setdefault(revision, []).append(ref)
1280 return branch2rev, rev2branch
1280 return branch2rev, rev2branch
1281
1281
1282 def _gittracking(self, branches):
1282 def _gittracking(self, branches):
1283 'return map of remote branch to local tracking branch'
1283 'return map of remote branch to local tracking branch'
1284 # assumes no more than one local tracking branch for each remote
1284 # assumes no more than one local tracking branch for each remote
1285 tracking = {}
1285 tracking = {}
1286 for b in branches:
1286 for b in branches:
1287 if b.startswith('refs/remotes/'):
1287 if b.startswith('refs/remotes/'):
1288 continue
1288 continue
1289 bname = b.split('/', 2)[2]
1289 bname = b.split('/', 2)[2]
1290 remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
1290 remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
1291 if remote:
1291 if remote:
1292 ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
1292 ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
1293 tracking['refs/remotes/%s/%s' %
1293 tracking['refs/remotes/%s/%s' %
1294 (remote, ref.split('/', 2)[2])] = b
1294 (remote, ref.split('/', 2)[2])] = b
1295 return tracking
1295 return tracking
1296
1296
1297 def _abssource(self, source):
1297 def _abssource(self, source):
1298 if '://' not in source:
1298 if '://' not in source:
1299 # recognize the scp syntax as an absolute source
1299 # recognize the scp syntax as an absolute source
1300 colon = source.find(':')
1300 colon = source.find(':')
1301 if colon != -1 and '/' not in source[:colon]:
1301 if colon != -1 and '/' not in source[:colon]:
1302 return source
1302 return source
1303 self._subsource = source
1303 self._subsource = source
1304 return _abssource(self)
1304 return _abssource(self)
1305
1305
1306 def _fetch(self, source, revision):
1306 def _fetch(self, source, revision):
1307 if self._gitmissing():
1307 if self._gitmissing():
1308 source = self._abssource(source)
1308 source = self._abssource(source)
1309 self._ui.status(_('cloning subrepo %s from %s\n') %
1309 self._ui.status(_('cloning subrepo %s from %s\n') %
1310 (self._relpath, source))
1310 (self._relpath, source))
1311 self._gitnodir(['clone', source, self._abspath])
1311 self._gitnodir(['clone', source, self._abspath])
1312 if self._githavelocally(revision):
1312 if self._githavelocally(revision):
1313 return
1313 return
1314 self._ui.status(_('pulling subrepo %s from %s\n') %
1314 self._ui.status(_('pulling subrepo %s from %s\n') %
1315 (self._relpath, self._gitremote('origin')))
1315 (self._relpath, self._gitremote('origin')))
1316 # try only origin: the originally cloned repo
1316 # try only origin: the originally cloned repo
1317 self._gitcommand(['fetch'])
1317 self._gitcommand(['fetch'])
1318 if not self._githavelocally(revision):
1318 if not self._githavelocally(revision):
1319 raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
1319 raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
1320 (revision, self._relpath))
1320 (revision, self._relpath))
1321
1321
1322 @annotatesubrepoerror
1322 @annotatesubrepoerror
1323 def dirty(self, ignoreupdate=False):
1323 def dirty(self, ignoreupdate=False):
1324 if self._gitmissing():
1324 if self._gitmissing():
1325 return self._state[1] != ''
1325 return self._state[1] != ''
1326 if self._gitisbare():
1326 if self._gitisbare():
1327 return True
1327 return True
1328 if not ignoreupdate and self._state[1] != self._gitstate():
1328 if not ignoreupdate and self._state[1] != self._gitstate():
1329 # different version checked out
1329 # different version checked out
1330 return True
1330 return True
1331 # check for staged changes or modified files; ignore untracked files
1331 # check for staged changes or modified files; ignore untracked files
1332 self._gitupdatestat()
1332 self._gitupdatestat()
1333 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1333 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1334 return code == 1
1334 return code == 1
1335
1335
1336 def basestate(self):
1336 def basestate(self):
1337 return self._gitstate()
1337 return self._gitstate()
1338
1338
1339 @annotatesubrepoerror
1339 @annotatesubrepoerror
1340 def get(self, state, overwrite=False):
1340 def get(self, state, overwrite=False):
1341 source, revision, kind = state
1341 source, revision, kind = state
1342 if not revision:
1342 if not revision:
1343 self.remove()
1343 self.remove()
1344 return
1344 return
1345 self._fetch(source, revision)
1345 self._fetch(source, revision)
1346 # if the repo was set to be bare, unbare it
1346 # if the repo was set to be bare, unbare it
1347 if self._gitisbare():
1347 if self._gitisbare():
1348 self._gitcommand(['config', 'core.bare', 'false'])
1348 self._gitcommand(['config', 'core.bare', 'false'])
1349 if self._gitstate() == revision:
1349 if self._gitstate() == revision:
1350 self._gitcommand(['reset', '--hard', 'HEAD'])
1350 self._gitcommand(['reset', '--hard', 'HEAD'])
1351 return
1351 return
1352 elif self._gitstate() == revision:
1352 elif self._gitstate() == revision:
1353 if overwrite:
1353 if overwrite:
1354 # first reset the index to unmark new files for commit, because
1354 # first reset the index to unmark new files for commit, because
1355 # reset --hard will otherwise throw away files added for commit,
1355 # reset --hard will otherwise throw away files added for commit,
1356 # not just unmark them.
1356 # not just unmark them.
1357 self._gitcommand(['reset', 'HEAD'])
1357 self._gitcommand(['reset', 'HEAD'])
1358 self._gitcommand(['reset', '--hard', 'HEAD'])
1358 self._gitcommand(['reset', '--hard', 'HEAD'])
1359 return
1359 return
1360 branch2rev, rev2branch = self._gitbranchmap()
1360 branch2rev, rev2branch = self._gitbranchmap()
1361
1361
1362 def checkout(args):
1362 def checkout(args):
1363 cmd = ['checkout']
1363 cmd = ['checkout']
1364 if overwrite:
1364 if overwrite:
1365 # first reset the index to unmark new files for commit, because
1365 # first reset the index to unmark new files for commit, because
1366 # the -f option will otherwise throw away files added for
1366 # the -f option will otherwise throw away files added for
1367 # commit, not just unmark them.
1367 # commit, not just unmark them.
1368 self._gitcommand(['reset', 'HEAD'])
1368 self._gitcommand(['reset', 'HEAD'])
1369 cmd.append('-f')
1369 cmd.append('-f')
1370 self._gitcommand(cmd + args)
1370 self._gitcommand(cmd + args)
1371 _sanitize(self._ui, self._abspath, '.git')
1371 _sanitize(self._ui, self._abspath, '.git')
1372
1372
1373 def rawcheckout():
1373 def rawcheckout():
1374 # no branch to checkout, check it out with no branch
1374 # no branch to checkout, check it out with no branch
1375 self._ui.warn(_('checking out detached HEAD in subrepo %s\n') %
1375 self._ui.warn(_('checking out detached HEAD in subrepo %s\n') %
1376 self._relpath)
1376 self._relpath)
1377 self._ui.warn(_('check out a git branch if you intend '
1377 self._ui.warn(_('check out a git branch if you intend '
1378 'to make changes\n'))
1378 'to make changes\n'))
1379 checkout(['-q', revision])
1379 checkout(['-q', revision])
1380
1380
1381 if revision not in rev2branch:
1381 if revision not in rev2branch:
1382 rawcheckout()
1382 rawcheckout()
1383 return
1383 return
1384 branches = rev2branch[revision]
1384 branches = rev2branch[revision]
1385 firstlocalbranch = None
1385 firstlocalbranch = None
1386 for b in branches:
1386 for b in branches:
1387 if b == 'refs/heads/master':
1387 if b == 'refs/heads/master':
1388 # master trumps all other branches
1388 # master trumps all other branches
1389 checkout(['refs/heads/master'])
1389 checkout(['refs/heads/master'])
1390 return
1390 return
1391 if not firstlocalbranch and not b.startswith('refs/remotes/'):
1391 if not firstlocalbranch and not b.startswith('refs/remotes/'):
1392 firstlocalbranch = b
1392 firstlocalbranch = b
1393 if firstlocalbranch:
1393 if firstlocalbranch:
1394 checkout([firstlocalbranch])
1394 checkout([firstlocalbranch])
1395 return
1395 return
1396
1396
1397 tracking = self._gittracking(branch2rev.keys())
1397 tracking = self._gittracking(branch2rev.keys())
1398 # choose a remote branch already tracked if possible
1398 # choose a remote branch already tracked if possible
1399 remote = branches[0]
1399 remote = branches[0]
1400 if remote not in tracking:
1400 if remote not in tracking:
1401 for b in branches:
1401 for b in branches:
1402 if b in tracking:
1402 if b in tracking:
1403 remote = b
1403 remote = b
1404 break
1404 break
1405
1405
1406 if remote not in tracking:
1406 if remote not in tracking:
1407 # create a new local tracking branch
1407 # create a new local tracking branch
1408 local = remote.split('/', 3)[3]
1408 local = remote.split('/', 3)[3]
1409 checkout(['-b', local, remote])
1409 checkout(['-b', local, remote])
1410 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
1410 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
1411 # When updating to a tracked remote branch,
1411 # When updating to a tracked remote branch,
1412 # if the local tracking branch is downstream of it,
1412 # if the local tracking branch is downstream of it,
1413 # a normal `git pull` would have performed a "fast-forward merge"
1413 # a normal `git pull` would have performed a "fast-forward merge"
1414 # which is equivalent to updating the local branch to the remote.
1414 # which is equivalent to updating the local branch to the remote.
1415 # Since we are only looking at branching at update, we need to
1415 # Since we are only looking at branching at update, we need to
1416 # detect this situation and perform this action lazily.
1416 # detect this situation and perform this action lazily.
1417 if tracking[remote] != self._gitcurrentbranch():
1417 if tracking[remote] != self._gitcurrentbranch():
1418 checkout([tracking[remote]])
1418 checkout([tracking[remote]])
1419 self._gitcommand(['merge', '--ff', remote])
1419 self._gitcommand(['merge', '--ff', remote])
1420 _sanitize(self._ui, self._abspath, '.git')
1420 _sanitize(self._ui, self._abspath, '.git')
1421 else:
1421 else:
1422 # a real merge would be required, just checkout the revision
1422 # a real merge would be required, just checkout the revision
1423 rawcheckout()
1423 rawcheckout()
1424
1424
1425 @annotatesubrepoerror
1425 @annotatesubrepoerror
1426 def commit(self, text, user, date):
1426 def commit(self, text, user, date):
1427 if self._gitmissing():
1427 if self._gitmissing():
1428 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1428 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1429 cmd = ['commit', '-a', '-m', text]
1429 cmd = ['commit', '-a', '-m', text]
1430 env = os.environ.copy()
1430 env = os.environ.copy()
1431 if user:
1431 if user:
1432 cmd += ['--author', user]
1432 cmd += ['--author', user]
1433 if date:
1433 if date:
1434 # git's date parser silently ignores when seconds < 1e9
1434 # git's date parser silently ignores when seconds < 1e9
1435 # convert to ISO8601
1435 # convert to ISO8601
1436 env['GIT_AUTHOR_DATE'] = util.datestr(date,
1436 env['GIT_AUTHOR_DATE'] = util.datestr(date,
1437 '%Y-%m-%dT%H:%M:%S %1%2')
1437 '%Y-%m-%dT%H:%M:%S %1%2')
1438 self._gitcommand(cmd, env=env)
1438 self._gitcommand(cmd, env=env)
1439 # make sure commit works otherwise HEAD might not exist under certain
1439 # make sure commit works otherwise HEAD might not exist under certain
1440 # circumstances
1440 # circumstances
1441 return self._gitstate()
1441 return self._gitstate()
1442
1442
1443 @annotatesubrepoerror
1443 @annotatesubrepoerror
1444 def merge(self, state):
1444 def merge(self, state):
1445 source, revision, kind = state
1445 source, revision, kind = state
1446 self._fetch(source, revision)
1446 self._fetch(source, revision)
1447 base = self._gitcommand(['merge-base', revision, self._state[1]])
1447 base = self._gitcommand(['merge-base', revision, self._state[1]])
1448 self._gitupdatestat()
1448 self._gitupdatestat()
1449 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1449 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1450
1450
1451 def mergefunc():
1451 def mergefunc():
1452 if base == revision:
1452 if base == revision:
1453 self.get(state) # fast forward merge
1453 self.get(state) # fast forward merge
1454 elif base != self._state[1]:
1454 elif base != self._state[1]:
1455 self._gitcommand(['merge', '--no-commit', revision])
1455 self._gitcommand(['merge', '--no-commit', revision])
1456 _sanitize(self._ui, self._abspath, '.git')
1456 _sanitize(self._ui, self._abspath, '.git')
1457
1457
1458 if self.dirty():
1458 if self.dirty():
1459 if self._gitstate() != revision:
1459 if self._gitstate() != revision:
1460 dirty = self._gitstate() == self._state[1] or code != 0
1460 dirty = self._gitstate() == self._state[1] or code != 0
1461 if _updateprompt(self._ui, self, dirty,
1461 if _updateprompt(self._ui, self, dirty,
1462 self._state[1][:7], revision[:7]):
1462 self._state[1][:7], revision[:7]):
1463 mergefunc()
1463 mergefunc()
1464 else:
1464 else:
1465 mergefunc()
1465 mergefunc()
1466
1466
1467 @annotatesubrepoerror
1467 @annotatesubrepoerror
1468 def push(self, opts):
1468 def push(self, opts):
1469 force = opts.get('force')
1469 force = opts.get('force')
1470
1470
1471 if not self._state[1]:
1471 if not self._state[1]:
1472 return True
1472 return True
1473 if self._gitmissing():
1473 if self._gitmissing():
1474 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1474 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1475 # if a branch in origin contains the revision, nothing to do
1475 # if a branch in origin contains the revision, nothing to do
1476 branch2rev, rev2branch = self._gitbranchmap()
1476 branch2rev, rev2branch = self._gitbranchmap()
1477 if self._state[1] in rev2branch:
1477 if self._state[1] in rev2branch:
1478 for b in rev2branch[self._state[1]]:
1478 for b in rev2branch[self._state[1]]:
1479 if b.startswith('refs/remotes/origin/'):
1479 if b.startswith('refs/remotes/origin/'):
1480 return True
1480 return True
1481 for b, revision in branch2rev.iteritems():
1481 for b, revision in branch2rev.iteritems():
1482 if b.startswith('refs/remotes/origin/'):
1482 if b.startswith('refs/remotes/origin/'):
1483 if self._gitisancestor(self._state[1], revision):
1483 if self._gitisancestor(self._state[1], revision):
1484 return True
1484 return True
1485 # otherwise, try to push the currently checked out branch
1485 # otherwise, try to push the currently checked out branch
1486 cmd = ['push']
1486 cmd = ['push']
1487 if force:
1487 if force:
1488 cmd.append('--force')
1488 cmd.append('--force')
1489
1489
1490 current = self._gitcurrentbranch()
1490 current = self._gitcurrentbranch()
1491 if current:
1491 if current:
1492 # determine if the current branch is even useful
1492 # determine if the current branch is even useful
1493 if not self._gitisancestor(self._state[1], current):
1493 if not self._gitisancestor(self._state[1], current):
1494 self._ui.warn(_('unrelated git branch checked out '
1494 self._ui.warn(_('unrelated git branch checked out '
1495 'in subrepo %s\n') % self._relpath)
1495 'in subrepo %s\n') % self._relpath)
1496 return False
1496 return False
1497 self._ui.status(_('pushing branch %s of subrepo %s\n') %
1497 self._ui.status(_('pushing branch %s of subrepo %s\n') %
1498 (current.split('/', 2)[2], self._relpath))
1498 (current.split('/', 2)[2], self._relpath))
1499 ret = self._gitdir(cmd + ['origin', current])
1499 ret = self._gitdir(cmd + ['origin', current])
1500 return ret[1] == 0
1500 return ret[1] == 0
1501 else:
1501 else:
1502 self._ui.warn(_('no branch checked out in subrepo %s\n'
1502 self._ui.warn(_('no branch checked out in subrepo %s\n'
1503 'cannot push revision %s\n') %
1503 'cannot push revision %s\n') %
1504 (self._relpath, self._state[1]))
1504 (self._relpath, self._state[1]))
1505 return False
1505 return False
1506
1506
1507 @annotatesubrepoerror
1507 @annotatesubrepoerror
1508 def remove(self):
1508 def remove(self):
1509 if self._gitmissing():
1509 if self._gitmissing():
1510 return
1510 return
1511 if self.dirty():
1511 if self.dirty():
1512 self._ui.warn(_('not removing repo %s because '
1512 self._ui.warn(_('not removing repo %s because '
1513 'it has changes.\n') % self._relpath)
1513 'it has changes.\n') % self._relpath)
1514 return
1514 return
1515 # we can't fully delete the repository as it may contain
1515 # we can't fully delete the repository as it may contain
1516 # local-only history
1516 # local-only history
1517 self._ui.note(_('removing subrepo %s\n') % self._relpath)
1517 self._ui.note(_('removing subrepo %s\n') % self._relpath)
1518 self._gitcommand(['config', 'core.bare', 'true'])
1518 self._gitcommand(['config', 'core.bare', 'true'])
1519 for f in os.listdir(self._abspath):
1519 for f in os.listdir(self._abspath):
1520 if f == '.git':
1520 if f == '.git':
1521 continue
1521 continue
1522 path = os.path.join(self._abspath, f)
1522 path = os.path.join(self._abspath, f)
1523 if os.path.isdir(path) and not os.path.islink(path):
1523 if os.path.isdir(path) and not os.path.islink(path):
1524 shutil.rmtree(path)
1524 shutil.rmtree(path)
1525 else:
1525 else:
1526 os.remove(path)
1526 os.remove(path)
1527
1527
1528 def archive(self, ui, archiver, prefix, match=None):
1528 def archive(self, ui, archiver, prefix, match=None):
1529 total = 0
1529 total = 0
1530 source, revision = self._state
1530 source, revision = self._state
1531 if not revision:
1531 if not revision:
1532 return total
1532 return total
1533 self._fetch(source, revision)
1533 self._fetch(source, revision)
1534
1534
1535 # Parse git's native archive command.
1535 # Parse git's native archive command.
1536 # This should be much faster than manually traversing the trees
1536 # This should be much faster than manually traversing the trees
1537 # and objects with many subprocess calls.
1537 # and objects with many subprocess calls.
1538 tarstream = self._gitcommand(['archive', revision], stream=True)
1538 tarstream = self._gitcommand(['archive', revision], stream=True)
1539 tar = tarfile.open(fileobj=tarstream, mode='r|')
1539 tar = tarfile.open(fileobj=tarstream, mode='r|')
1540 relpath = subrelpath(self)
1540 relpath = subrelpath(self)
1541 ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
1541 ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
1542 for i, info in enumerate(tar):
1542 for i, info in enumerate(tar):
1543 if info.isdir():
1543 if info.isdir():
1544 continue
1544 continue
1545 if match and not match(info.name):
1545 if match and not match(info.name):
1546 continue
1546 continue
1547 if info.issym():
1547 if info.issym():
1548 data = info.linkname
1548 data = info.linkname
1549 else:
1549 else:
1550 data = tar.extractfile(info).read()
1550 data = tar.extractfile(info).read()
1551 archiver.addfile(os.path.join(prefix, self._path, info.name),
1551 archiver.addfile(os.path.join(prefix, self._path, info.name),
1552 info.mode, info.issym(), data)
1552 info.mode, info.issym(), data)
1553 total += 1
1553 total += 1
1554 ui.progress(_('archiving (%s)') % relpath, i + 1,
1554 ui.progress(_('archiving (%s)') % relpath, i + 1,
1555 unit=_('files'))
1555 unit=_('files'))
1556 ui.progress(_('archiving (%s)') % relpath, None)
1556 ui.progress(_('archiving (%s)') % relpath, None)
1557 return total
1557 return total
1558
1558
1559
1559
1560 @annotatesubrepoerror
1560 @annotatesubrepoerror
1561 def status(self, rev2, **opts):
1561 def status(self, rev2, **opts):
1562 rev1 = self._state[1]
1562 rev1 = self._state[1]
1563 if self._gitmissing() or not rev1:
1563 if self._gitmissing() or not rev1:
1564 # if the repo is missing, return no results
1564 # if the repo is missing, return no results
1565 return [], [], [], [], [], [], []
1565 return [], [], [], [], [], [], []
1566 modified, added, removed = [], [], []
1566 modified, added, removed = [], [], []
1567 self._gitupdatestat()
1567 self._gitupdatestat()
1568 if rev2:
1568 if rev2:
1569 command = ['diff-tree', rev1, rev2]
1569 command = ['diff-tree', rev1, rev2]
1570 else:
1570 else:
1571 command = ['diff-index', rev1]
1571 command = ['diff-index', rev1]
1572 out = self._gitcommand(command)
1572 out = self._gitcommand(command)
1573 for line in out.split('\n'):
1573 for line in out.split('\n'):
1574 tab = line.find('\t')
1574 tab = line.find('\t')
1575 if tab == -1:
1575 if tab == -1:
1576 continue
1576 continue
1577 status, f = line[tab - 1], line[tab + 1:]
1577 status, f = line[tab - 1], line[tab + 1:]
1578 if status == 'M':
1578 if status == 'M':
1579 modified.append(f)
1579 modified.append(f)
1580 elif status == 'A':
1580 elif status == 'A':
1581 added.append(f)
1581 added.append(f)
1582 elif status == 'D':
1582 elif status == 'D':
1583 removed.append(f)
1583 removed.append(f)
1584
1584
1585 deleted = unknown = ignored = clean = []
1585 deleted = unknown = ignored = clean = []
1586 return modified, added, removed, deleted, unknown, ignored, clean
1586 return scmutil.status(modified, added, removed, deleted,
1587 unknown, ignored, clean)
1587
1588
1588 def shortid(self, revid):
1589 def shortid(self, revid):
1589 return revid[:7]
1590 return revid[:7]
1590
1591
1591 types = {
1592 types = {
1592 'hg': hgsubrepo,
1593 'hg': hgsubrepo,
1593 'svn': svnsubrepo,
1594 'svn': svnsubrepo,
1594 'git': gitsubrepo,
1595 'git': gitsubrepo,
1595 }
1596 }
@@ -1,13 +1,13 b''
1 workingfilectx.date = (1000, 0)
1 workingfilectx.date = (1000, 0)
2 ASCII : Gr?ezi!
2 ASCII : Gr?ezi!
3 Latin-1 : Gr�ezi!
3 Latin-1 : Gr�ezi!
4 UTF-8 : Grüezi!
4 UTF-8 : Grüezi!
5 (['foo'], [], [], [], [], [], [])
5 <status modified=['foo'], added=[], removed=[], deleted=[], unknown=[], ignored=[], clean=[]>
6 diff --git a/foo b/foo
6 diff --git a/foo b/foo
7
7
8 --- a/foo
8 --- a/foo
9 +++ b/foo
9 +++ b/foo
10 @@ -1,1 +1,2 @@
10 @@ -1,1 +1,2 @@
11 foo
11 foo
12 +bar
12 +bar
13
13
General Comments 0
You need to be logged in to leave comments. Login now