##// END OF EJS Templates
merge: don't use unknown()...
Matt Mackall -
r16094:0776a6ca default
parent child Browse files
Show More
@@ -1,961 +1,961 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 node, archival, error, merge
15 node, archival, error, merge
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from hgext import rebase
18 from hgext import rebase
19
19
20 import lfutil
20 import lfutil
21 import lfcommands
21 import lfcommands
22
22
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24
24
25 def installnormalfilesmatchfn(manifest):
25 def installnormalfilesmatchfn(manifest):
26 '''overrides scmutil.match so that the matcher it returns will ignore all
26 '''overrides scmutil.match so that the matcher it returns will ignore all
27 largefiles'''
27 largefiles'''
28 oldmatch = None # for the closure
28 oldmatch = None # for the closure
29 def override_match(ctx, pats=[], opts={}, globbed=False,
29 def override_match(ctx, pats=[], opts={}, globbed=False,
30 default='relpath'):
30 default='relpath'):
31 match = oldmatch(ctx, pats, opts, globbed, default)
31 match = oldmatch(ctx, pats, opts, globbed, default)
32 m = copy.copy(match)
32 m = copy.copy(match)
33 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
33 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
34 manifest)
34 manifest)
35 m._files = filter(notlfile, m._files)
35 m._files = filter(notlfile, m._files)
36 m._fmap = set(m._files)
36 m._fmap = set(m._files)
37 orig_matchfn = m.matchfn
37 orig_matchfn = m.matchfn
38 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
38 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
39 return m
39 return m
40 oldmatch = installmatchfn(override_match)
40 oldmatch = installmatchfn(override_match)
41
41
42 def installmatchfn(f):
42 def installmatchfn(f):
43 oldmatch = scmutil.match
43 oldmatch = scmutil.match
44 setattr(f, 'oldmatch', oldmatch)
44 setattr(f, 'oldmatch', oldmatch)
45 scmutil.match = f
45 scmutil.match = f
46 return oldmatch
46 return oldmatch
47
47
48 def restorematchfn():
48 def restorematchfn():
49 '''restores scmutil.match to what it was before installnormalfilesmatchfn
49 '''restores scmutil.match to what it was before installnormalfilesmatchfn
50 was called. no-op if scmutil.match is its original function.
50 was called. no-op if scmutil.match is its original function.
51
51
52 Note that n calls to installnormalfilesmatchfn will require n calls to
52 Note that n calls to installnormalfilesmatchfn will require n calls to
53 restore matchfn to reverse'''
53 restore matchfn to reverse'''
54 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
54 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
55
55
56 def add_largefiles(ui, repo, *pats, **opts):
56 def add_largefiles(ui, repo, *pats, **opts):
57 large = opts.pop('large', None)
57 large = opts.pop('large', None)
58 lfsize = lfutil.getminsize(
58 lfsize = lfutil.getminsize(
59 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
59 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
60
60
61 lfmatcher = None
61 lfmatcher = None
62 if lfutil.islfilesrepo(repo):
62 if lfutil.islfilesrepo(repo):
63 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
63 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
64 if lfpats:
64 if lfpats:
65 lfmatcher = match_.match(repo.root, '', list(lfpats))
65 lfmatcher = match_.match(repo.root, '', list(lfpats))
66
66
67 lfnames = []
67 lfnames = []
68 m = scmutil.match(repo[None], pats, opts)
68 m = scmutil.match(repo[None], pats, opts)
69 m.bad = lambda x, y: None
69 m.bad = lambda x, y: None
70 wctx = repo[None]
70 wctx = repo[None]
71 for f in repo.walk(m):
71 for f in repo.walk(m):
72 exact = m.exact(f)
72 exact = m.exact(f)
73 lfile = lfutil.standin(f) in wctx
73 lfile = lfutil.standin(f) in wctx
74 nfile = f in wctx
74 nfile = f in wctx
75 exists = lfile or nfile
75 exists = lfile or nfile
76
76
77 # Don't warn the user when they attempt to add a normal tracked file.
77 # Don't warn the user when they attempt to add a normal tracked file.
78 # The normal add code will do that for us.
78 # The normal add code will do that for us.
79 if exact and exists:
79 if exact and exists:
80 if lfile:
80 if lfile:
81 ui.warn(_('%s already a largefile\n') % f)
81 ui.warn(_('%s already a largefile\n') % f)
82 continue
82 continue
83
83
84 if exact or not exists:
84 if exact or not exists:
85 abovemin = (lfsize and
85 abovemin = (lfsize and
86 os.lstat(repo.wjoin(f)).st_size >= lfsize * 1024 * 1024)
86 os.lstat(repo.wjoin(f)).st_size >= lfsize * 1024 * 1024)
87 if large or abovemin or (lfmatcher and lfmatcher(f)):
87 if large or abovemin or (lfmatcher and lfmatcher(f)):
88 lfnames.append(f)
88 lfnames.append(f)
89 if ui.verbose or not exact:
89 if ui.verbose or not exact:
90 ui.status(_('adding %s as a largefile\n') % m.rel(f))
90 ui.status(_('adding %s as a largefile\n') % m.rel(f))
91
91
92 bad = []
92 bad = []
93 standins = []
93 standins = []
94
94
95 # Need to lock, otherwise there could be a race condition between
95 # Need to lock, otherwise there could be a race condition between
96 # when standins are created and added to the repo.
96 # when standins are created and added to the repo.
97 wlock = repo.wlock()
97 wlock = repo.wlock()
98 try:
98 try:
99 if not opts.get('dry_run'):
99 if not opts.get('dry_run'):
100 lfdirstate = lfutil.openlfdirstate(ui, repo)
100 lfdirstate = lfutil.openlfdirstate(ui, repo)
101 for f in lfnames:
101 for f in lfnames:
102 standinname = lfutil.standin(f)
102 standinname = lfutil.standin(f)
103 lfutil.writestandin(repo, standinname, hash='',
103 lfutil.writestandin(repo, standinname, hash='',
104 executable=lfutil.getexecutable(repo.wjoin(f)))
104 executable=lfutil.getexecutable(repo.wjoin(f)))
105 standins.append(standinname)
105 standins.append(standinname)
106 if lfdirstate[f] == 'r':
106 if lfdirstate[f] == 'r':
107 lfdirstate.normallookup(f)
107 lfdirstate.normallookup(f)
108 else:
108 else:
109 lfdirstate.add(f)
109 lfdirstate.add(f)
110 lfdirstate.write()
110 lfdirstate.write()
111 bad += [lfutil.splitstandin(f)
111 bad += [lfutil.splitstandin(f)
112 for f in lfutil.repo_add(repo, standins)
112 for f in lfutil.repo_add(repo, standins)
113 if f in m.files()]
113 if f in m.files()]
114 finally:
114 finally:
115 wlock.release()
115 wlock.release()
116 return bad
116 return bad
117
117
118 def remove_largefiles(ui, repo, *pats, **opts):
118 def remove_largefiles(ui, repo, *pats, **opts):
119 after = opts.get('after')
119 after = opts.get('after')
120 if not pats and not after:
120 if not pats and not after:
121 raise util.Abort(_('no files specified'))
121 raise util.Abort(_('no files specified'))
122 m = scmutil.match(repo[None], pats, opts)
122 m = scmutil.match(repo[None], pats, opts)
123 try:
123 try:
124 repo.lfstatus = True
124 repo.lfstatus = True
125 s = repo.status(match=m, clean=True)
125 s = repo.status(match=m, clean=True)
126 finally:
126 finally:
127 repo.lfstatus = False
127 repo.lfstatus = False
128 manifest = repo[None].manifest()
128 manifest = repo[None].manifest()
129 modified, added, deleted, clean = [[f for f in list
129 modified, added, deleted, clean = [[f for f in list
130 if lfutil.standin(f) in manifest]
130 if lfutil.standin(f) in manifest]
131 for list in [s[0], s[1], s[3], s[6]]]
131 for list in [s[0], s[1], s[3], s[6]]]
132
132
133 def warn(files, reason):
133 def warn(files, reason):
134 for f in files:
134 for f in files:
135 ui.warn(_('not removing %s: %s (use forget to undo)\n')
135 ui.warn(_('not removing %s: %s (use forget to undo)\n')
136 % (m.rel(f), reason))
136 % (m.rel(f), reason))
137
137
138 if after:
138 if after:
139 remove, forget = deleted, []
139 remove, forget = deleted, []
140 warn(modified + added + clean, _('file still exists'))
140 warn(modified + added + clean, _('file still exists'))
141 else:
141 else:
142 remove, forget = deleted + clean, []
142 remove, forget = deleted + clean, []
143 warn(modified, _('file is modified'))
143 warn(modified, _('file is modified'))
144 warn(added, _('file has been marked for add'))
144 warn(added, _('file has been marked for add'))
145
145
146 for f in sorted(remove + forget):
146 for f in sorted(remove + forget):
147 if ui.verbose or not m.exact(f):
147 if ui.verbose or not m.exact(f):
148 ui.status(_('removing %s\n') % m.rel(f))
148 ui.status(_('removing %s\n') % m.rel(f))
149
149
150 # Need to lock because standin files are deleted then removed from the
150 # Need to lock because standin files are deleted then removed from the
151 # repository and we could race inbetween.
151 # repository and we could race inbetween.
152 wlock = repo.wlock()
152 wlock = repo.wlock()
153 try:
153 try:
154 lfdirstate = lfutil.openlfdirstate(ui, repo)
154 lfdirstate = lfutil.openlfdirstate(ui, repo)
155 for f in remove:
155 for f in remove:
156 if not after:
156 if not after:
157 # If this is being called by addremove, notify the user that we
157 # If this is being called by addremove, notify the user that we
158 # are removing the file.
158 # are removing the file.
159 if getattr(repo, "_isaddremove", False):
159 if getattr(repo, "_isaddremove", False):
160 ui.status(_('removing %s\n' % f))
160 ui.status(_('removing %s\n' % f))
161 if os.path.exists(repo.wjoin(f)):
161 if os.path.exists(repo.wjoin(f)):
162 util.unlinkpath(repo.wjoin(f))
162 util.unlinkpath(repo.wjoin(f))
163 lfdirstate.remove(f)
163 lfdirstate.remove(f)
164 lfdirstate.write()
164 lfdirstate.write()
165 forget = [lfutil.standin(f) for f in forget]
165 forget = [lfutil.standin(f) for f in forget]
166 remove = [lfutil.standin(f) for f in remove]
166 remove = [lfutil.standin(f) for f in remove]
167 lfutil.repo_forget(repo, forget)
167 lfutil.repo_forget(repo, forget)
168 # If this is being called by addremove, let the original addremove
168 # If this is being called by addremove, let the original addremove
169 # function handle this.
169 # function handle this.
170 if not getattr(repo, "_isaddremove", False):
170 if not getattr(repo, "_isaddremove", False):
171 lfutil.repo_remove(repo, remove, unlink=True)
171 lfutil.repo_remove(repo, remove, unlink=True)
172 finally:
172 finally:
173 wlock.release()
173 wlock.release()
174
174
175 # -- Wrappers: modify existing commands --------------------------------
175 # -- Wrappers: modify existing commands --------------------------------
176
176
177 # Add works by going through the files that the user wanted to add and
177 # Add works by going through the files that the user wanted to add and
178 # checking if they should be added as largefiles. Then it makes a new
178 # checking if they should be added as largefiles. Then it makes a new
179 # matcher which matches only the normal files and runs the original
179 # matcher which matches only the normal files and runs the original
180 # version of add.
180 # version of add.
181 def override_add(orig, ui, repo, *pats, **opts):
181 def override_add(orig, ui, repo, *pats, **opts):
182 normal = opts.pop('normal')
182 normal = opts.pop('normal')
183 if normal:
183 if normal:
184 if opts.get('large'):
184 if opts.get('large'):
185 raise util.Abort(_('--normal cannot be used with --large'))
185 raise util.Abort(_('--normal cannot be used with --large'))
186 return orig(ui, repo, *pats, **opts)
186 return orig(ui, repo, *pats, **opts)
187 bad = add_largefiles(ui, repo, *pats, **opts)
187 bad = add_largefiles(ui, repo, *pats, **opts)
188 installnormalfilesmatchfn(repo[None].manifest())
188 installnormalfilesmatchfn(repo[None].manifest())
189 result = orig(ui, repo, *pats, **opts)
189 result = orig(ui, repo, *pats, **opts)
190 restorematchfn()
190 restorematchfn()
191
191
192 return (result == 1 or bad) and 1 or 0
192 return (result == 1 or bad) and 1 or 0
193
193
194 def override_remove(orig, ui, repo, *pats, **opts):
194 def override_remove(orig, ui, repo, *pats, **opts):
195 installnormalfilesmatchfn(repo[None].manifest())
195 installnormalfilesmatchfn(repo[None].manifest())
196 orig(ui, repo, *pats, **opts)
196 orig(ui, repo, *pats, **opts)
197 restorematchfn()
197 restorematchfn()
198 remove_largefiles(ui, repo, *pats, **opts)
198 remove_largefiles(ui, repo, *pats, **opts)
199
199
200 def override_status(orig, ui, repo, *pats, **opts):
200 def override_status(orig, ui, repo, *pats, **opts):
201 try:
201 try:
202 repo.lfstatus = True
202 repo.lfstatus = True
203 return orig(ui, repo, *pats, **opts)
203 return orig(ui, repo, *pats, **opts)
204 finally:
204 finally:
205 repo.lfstatus = False
205 repo.lfstatus = False
206
206
207 def override_log(orig, ui, repo, *pats, **opts):
207 def override_log(orig, ui, repo, *pats, **opts):
208 try:
208 try:
209 repo.lfstatus = True
209 repo.lfstatus = True
210 orig(ui, repo, *pats, **opts)
210 orig(ui, repo, *pats, **opts)
211 finally:
211 finally:
212 repo.lfstatus = False
212 repo.lfstatus = False
213
213
214 def override_verify(orig, ui, repo, *pats, **opts):
214 def override_verify(orig, ui, repo, *pats, **opts):
215 large = opts.pop('large', False)
215 large = opts.pop('large', False)
216 all = opts.pop('lfa', False)
216 all = opts.pop('lfa', False)
217 contents = opts.pop('lfc', False)
217 contents = opts.pop('lfc', False)
218
218
219 result = orig(ui, repo, *pats, **opts)
219 result = orig(ui, repo, *pats, **opts)
220 if large:
220 if large:
221 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
221 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
222 return result
222 return result
223
223
224 # Override needs to refresh standins so that update's normal merge
224 # Override needs to refresh standins so that update's normal merge
225 # will go through properly. Then the other update hook (overriding repo.update)
225 # will go through properly. Then the other update hook (overriding repo.update)
226 # will get the new files. Filemerge is also overriden so that the merge
226 # will get the new files. Filemerge is also overriden so that the merge
227 # will merge standins correctly.
227 # will merge standins correctly.
228 def override_update(orig, ui, repo, *pats, **opts):
228 def override_update(orig, ui, repo, *pats, **opts):
229 lfdirstate = lfutil.openlfdirstate(ui, repo)
229 lfdirstate = lfutil.openlfdirstate(ui, repo)
230 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
230 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
231 False, False)
231 False, False)
232 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
232 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
233
233
234 # Need to lock between the standins getting updated and their
234 # Need to lock between the standins getting updated and their
235 # largefiles getting updated
235 # largefiles getting updated
236 wlock = repo.wlock()
236 wlock = repo.wlock()
237 try:
237 try:
238 if opts['check']:
238 if opts['check']:
239 mod = len(modified) > 0
239 mod = len(modified) > 0
240 for lfile in unsure:
240 for lfile in unsure:
241 standin = lfutil.standin(lfile)
241 standin = lfutil.standin(lfile)
242 if repo['.'][standin].data().strip() != \
242 if repo['.'][standin].data().strip() != \
243 lfutil.hashfile(repo.wjoin(lfile)):
243 lfutil.hashfile(repo.wjoin(lfile)):
244 mod = True
244 mod = True
245 else:
245 else:
246 lfdirstate.normal(lfile)
246 lfdirstate.normal(lfile)
247 lfdirstate.write()
247 lfdirstate.write()
248 if mod:
248 if mod:
249 raise util.Abort(_('uncommitted local changes'))
249 raise util.Abort(_('uncommitted local changes'))
250 # XXX handle removed differently
250 # XXX handle removed differently
251 if not opts['clean']:
251 if not opts['clean']:
252 for lfile in unsure + modified + added:
252 for lfile in unsure + modified + added:
253 lfutil.updatestandin(repo, lfutil.standin(lfile))
253 lfutil.updatestandin(repo, lfutil.standin(lfile))
254 finally:
254 finally:
255 wlock.release()
255 wlock.release()
256 return orig(ui, repo, *pats, **opts)
256 return orig(ui, repo, *pats, **opts)
257
257
258 # Before starting the manifest merge, merge.updates will call
258 # Before starting the manifest merge, merge.updates will call
259 # _checkunknown to check if there are any files in the merged-in
259 # _checkunknown to check if there are any files in the merged-in
260 # changeset that collide with unknown files in the working copy.
260 # changeset that collide with unknown files in the working copy.
261 #
261 #
262 # The largefiles are seen as unknown, so this prevents us from merging
262 # The largefiles are seen as unknown, so this prevents us from merging
263 # in a file 'foo' if we already have a largefile with the same name.
263 # in a file 'foo' if we already have a largefile with the same name.
264 #
264 #
265 # The overridden function filters the unknown files by removing any
265 # The overridden function filters the unknown files by removing any
266 # largefiles. This makes the merge proceed and we can then handle this
266 # largefiles. This makes the merge proceed and we can then handle this
267 # case further in the overridden manifestmerge function below.
267 # case further in the overridden manifestmerge function below.
268 def override_checkunknownfile(origfn, repo, wctx, mctx, f):
268 def override_checkunknownfile(origfn, repo, wctx, mctx, f):
269 if lfutil.standin(f) in wctx:
269 if lfutil.standin(f) in wctx:
270 return False
270 return False
271 return origfn(repo, wctx, mctx, f)
271 return origfn(repo, wctx, mctx, f)
272
272
273 # The manifest merge handles conflicts on the manifest level. We want
273 # The manifest merge handles conflicts on the manifest level. We want
274 # to handle changes in largefile-ness of files at this level too.
274 # to handle changes in largefile-ness of files at this level too.
275 #
275 #
276 # The strategy is to run the original manifestmerge and then process
276 # The strategy is to run the original manifestmerge and then process
277 # the action list it outputs. There are two cases we need to deal with:
277 # the action list it outputs. There are two cases we need to deal with:
278 #
278 #
279 # 1. Normal file in p1, largefile in p2. Here the largefile is
279 # 1. Normal file in p1, largefile in p2. Here the largefile is
280 # detected via its standin file, which will enter the working copy
280 # detected via its standin file, which will enter the working copy
281 # with a "get" action. It is not "merge" since the standin is all
281 # with a "get" action. It is not "merge" since the standin is all
282 # Mercurial is concerned with at this level -- the link to the
282 # Mercurial is concerned with at this level -- the link to the
283 # existing normal file is not relevant here.
283 # existing normal file is not relevant here.
284 #
284 #
285 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
285 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
286 # since the largefile will be present in the working copy and
286 # since the largefile will be present in the working copy and
287 # different from the normal file in p2. Mercurial therefore
287 # different from the normal file in p2. Mercurial therefore
288 # triggers a merge action.
288 # triggers a merge action.
289 #
289 #
290 # In both cases, we prompt the user and emit new actions to either
290 # In both cases, we prompt the user and emit new actions to either
291 # remove the standin (if the normal file was kept) or to remove the
291 # remove the standin (if the normal file was kept) or to remove the
292 # normal file and get the standin (if the largefile was kept). The
292 # normal file and get the standin (if the largefile was kept). The
293 # default prompt answer is to use the largefile version since it was
293 # default prompt answer is to use the largefile version since it was
294 # presumably changed on purpose.
294 # presumably changed on purpose.
295 #
295 #
296 # Finally, the merge.applyupdates function will then take care of
296 # Finally, the merge.applyupdates function will then take care of
297 # writing the files into the working copy and lfcommands.updatelfiles
297 # writing the files into the working copy and lfcommands.updatelfiles
298 # will update the largefiles.
298 # will update the largefiles.
299 def override_manifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
299 def override_manifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
300 actions = origfn(repo, p1, p2, pa, overwrite, partial)
300 actions = origfn(repo, p1, p2, pa, overwrite, partial)
301 processed = []
301 processed = []
302
302
303 for action in actions:
303 for action in actions:
304 if overwrite:
304 if overwrite:
305 processed.append(action)
305 processed.append(action)
306 continue
306 continue
307 f, m = action[:2]
307 f, m = action[:2]
308
308
309 choices = (_('&Largefile'), _('&Normal file'))
309 choices = (_('&Largefile'), _('&Normal file'))
310 if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
310 if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
311 # Case 1: normal file in the working copy, largefile in
311 # Case 1: normal file in the working copy, largefile in
312 # the second parent
312 # the second parent
313 lfile = lfutil.splitstandin(f)
313 lfile = lfutil.splitstandin(f)
314 standin = f
314 standin = f
315 msg = _('%s has been turned into a largefile\n'
315 msg = _('%s has been turned into a largefile\n'
316 'use (l)argefile or keep as (n)ormal file?') % lfile
316 'use (l)argefile or keep as (n)ormal file?') % lfile
317 if repo.ui.promptchoice(msg, choices, 0) == 0:
317 if repo.ui.promptchoice(msg, choices, 0) == 0:
318 processed.append((lfile, "r"))
318 processed.append((lfile, "r"))
319 processed.append((standin, "g", p2.flags(standin)))
319 processed.append((standin, "g", p2.flags(standin)))
320 else:
320 else:
321 processed.append((standin, "r"))
321 processed.append((standin, "r"))
322 elif m == "m" and lfutil.standin(f) in p1 and f in p2:
322 elif m == "g" and lfutil.standin(f) in p1 and f in p2:
323 # Case 2: largefile in the working copy, normal file in
323 # Case 2: largefile in the working copy, normal file in
324 # the second parent
324 # the second parent
325 standin = lfutil.standin(f)
325 standin = lfutil.standin(f)
326 lfile = f
326 lfile = f
327 msg = _('%s has been turned into a normal file\n'
327 msg = _('%s has been turned into a normal file\n'
328 'keep as (l)argefile or use (n)ormal file?') % lfile
328 'keep as (l)argefile or use (n)ormal file?') % lfile
329 if repo.ui.promptchoice(msg, choices, 0) == 0:
329 if repo.ui.promptchoice(msg, choices, 0) == 0:
330 processed.append((lfile, "r"))
330 processed.append((lfile, "r"))
331 else:
331 else:
332 processed.append((standin, "r"))
332 processed.append((standin, "r"))
333 processed.append((lfile, "g", p2.flags(lfile)))
333 processed.append((lfile, "g", p2.flags(lfile)))
334 else:
334 else:
335 processed.append(action)
335 processed.append(action)
336
336
337 return processed
337 return processed
338
338
339 # Override filemerge to prompt the user about how they wish to merge
339 # Override filemerge to prompt the user about how they wish to merge
340 # largefiles. This will handle identical edits, and copy/rename +
340 # largefiles. This will handle identical edits, and copy/rename +
341 # edit without prompting the user.
341 # edit without prompting the user.
342 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
342 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
343 # Use better variable names here. Because this is a wrapper we cannot
343 # Use better variable names here. Because this is a wrapper we cannot
344 # change the variable names in the function declaration.
344 # change the variable names in the function declaration.
345 fcdest, fcother, fcancestor = fcd, fco, fca
345 fcdest, fcother, fcancestor = fcd, fco, fca
346 if not lfutil.isstandin(orig):
346 if not lfutil.isstandin(orig):
347 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
347 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
348 else:
348 else:
349 if not fcother.cmp(fcdest): # files identical?
349 if not fcother.cmp(fcdest): # files identical?
350 return None
350 return None
351
351
352 # backwards, use working dir parent as ancestor
352 # backwards, use working dir parent as ancestor
353 if fcancestor == fcother:
353 if fcancestor == fcother:
354 fcancestor = fcdest.parents()[0]
354 fcancestor = fcdest.parents()[0]
355
355
356 if orig != fcother.path():
356 if orig != fcother.path():
357 repo.ui.status(_('merging %s and %s to %s\n')
357 repo.ui.status(_('merging %s and %s to %s\n')
358 % (lfutil.splitstandin(orig),
358 % (lfutil.splitstandin(orig),
359 lfutil.splitstandin(fcother.path()),
359 lfutil.splitstandin(fcother.path()),
360 lfutil.splitstandin(fcdest.path())))
360 lfutil.splitstandin(fcdest.path())))
361 else:
361 else:
362 repo.ui.status(_('merging %s\n')
362 repo.ui.status(_('merging %s\n')
363 % lfutil.splitstandin(fcdest.path()))
363 % lfutil.splitstandin(fcdest.path()))
364
364
365 if fcancestor.path() != fcother.path() and fcother.data() == \
365 if fcancestor.path() != fcother.path() and fcother.data() == \
366 fcancestor.data():
366 fcancestor.data():
367 return 0
367 return 0
368 if fcancestor.path() != fcdest.path() and fcdest.data() == \
368 if fcancestor.path() != fcdest.path() and fcdest.data() == \
369 fcancestor.data():
369 fcancestor.data():
370 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
370 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
371 return 0
371 return 0
372
372
373 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
373 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
374 'keep (l)ocal or take (o)ther?') %
374 'keep (l)ocal or take (o)ther?') %
375 lfutil.splitstandin(orig),
375 lfutil.splitstandin(orig),
376 (_('&Local'), _('&Other')), 0) == 0:
376 (_('&Local'), _('&Other')), 0) == 0:
377 return 0
377 return 0
378 else:
378 else:
379 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
379 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
380 return 0
380 return 0
381
381
382 # Copy first changes the matchers to match standins instead of
382 # Copy first changes the matchers to match standins instead of
383 # largefiles. Then it overrides util.copyfile in that function it
383 # largefiles. Then it overrides util.copyfile in that function it
384 # checks if the destination largefile already exists. It also keeps a
384 # checks if the destination largefile already exists. It also keeps a
385 # list of copied files so that the largefiles can be copied and the
385 # list of copied files so that the largefiles can be copied and the
386 # dirstate updated.
386 # dirstate updated.
387 def override_copy(orig, ui, repo, pats, opts, rename=False):
387 def override_copy(orig, ui, repo, pats, opts, rename=False):
388 # doesn't remove largefile on rename
388 # doesn't remove largefile on rename
389 if len(pats) < 2:
389 if len(pats) < 2:
390 # this isn't legal, let the original function deal with it
390 # this isn't legal, let the original function deal with it
391 return orig(ui, repo, pats, opts, rename)
391 return orig(ui, repo, pats, opts, rename)
392
392
393 def makestandin(relpath):
393 def makestandin(relpath):
394 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
394 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
395 return os.path.join(repo.wjoin(lfutil.standin(path)))
395 return os.path.join(repo.wjoin(lfutil.standin(path)))
396
396
397 fullpats = scmutil.expandpats(pats)
397 fullpats = scmutil.expandpats(pats)
398 dest = fullpats[-1]
398 dest = fullpats[-1]
399
399
400 if os.path.isdir(dest):
400 if os.path.isdir(dest):
401 if not os.path.isdir(makestandin(dest)):
401 if not os.path.isdir(makestandin(dest)):
402 os.makedirs(makestandin(dest))
402 os.makedirs(makestandin(dest))
403 # This could copy both lfiles and normal files in one command,
403 # This could copy both lfiles and normal files in one command,
404 # but we don't want to do that. First replace their matcher to
404 # but we don't want to do that. First replace their matcher to
405 # only match normal files and run it, then replace it to just
405 # only match normal files and run it, then replace it to just
406 # match largefiles and run it again.
406 # match largefiles and run it again.
407 nonormalfiles = False
407 nonormalfiles = False
408 nolfiles = False
408 nolfiles = False
409 try:
409 try:
410 try:
410 try:
411 installnormalfilesmatchfn(repo[None].manifest())
411 installnormalfilesmatchfn(repo[None].manifest())
412 result = orig(ui, repo, pats, opts, rename)
412 result = orig(ui, repo, pats, opts, rename)
413 except util.Abort, e:
413 except util.Abort, e:
414 if str(e) != 'no files to copy':
414 if str(e) != 'no files to copy':
415 raise e
415 raise e
416 else:
416 else:
417 nonormalfiles = True
417 nonormalfiles = True
418 result = 0
418 result = 0
419 finally:
419 finally:
420 restorematchfn()
420 restorematchfn()
421
421
422 # The first rename can cause our current working directory to be removed.
422 # The first rename can cause our current working directory to be removed.
423 # In that case there is nothing left to copy/rename so just quit.
423 # In that case there is nothing left to copy/rename so just quit.
424 try:
424 try:
425 repo.getcwd()
425 repo.getcwd()
426 except OSError:
426 except OSError:
427 return result
427 return result
428
428
429 try:
429 try:
430 try:
430 try:
431 # When we call orig below it creates the standins but we don't add them
431 # When we call orig below it creates the standins but we don't add them
432 # to the dir state until later so lock during that time.
432 # to the dir state until later so lock during that time.
433 wlock = repo.wlock()
433 wlock = repo.wlock()
434
434
435 manifest = repo[None].manifest()
435 manifest = repo[None].manifest()
436 oldmatch = None # for the closure
436 oldmatch = None # for the closure
437 def override_match(ctx, pats=[], opts={}, globbed=False,
437 def override_match(ctx, pats=[], opts={}, globbed=False,
438 default='relpath'):
438 default='relpath'):
439 newpats = []
439 newpats = []
440 # The patterns were previously mangled to add the standin
440 # The patterns were previously mangled to add the standin
441 # directory; we need to remove that now
441 # directory; we need to remove that now
442 for pat in pats:
442 for pat in pats:
443 if match_.patkind(pat) is None and lfutil.shortname in pat:
443 if match_.patkind(pat) is None and lfutil.shortname in pat:
444 newpats.append(pat.replace(lfutil.shortname, ''))
444 newpats.append(pat.replace(lfutil.shortname, ''))
445 else:
445 else:
446 newpats.append(pat)
446 newpats.append(pat)
447 match = oldmatch(ctx, newpats, opts, globbed, default)
447 match = oldmatch(ctx, newpats, opts, globbed, default)
448 m = copy.copy(match)
448 m = copy.copy(match)
449 lfile = lambda f: lfutil.standin(f) in manifest
449 lfile = lambda f: lfutil.standin(f) in manifest
450 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
450 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
451 m._fmap = set(m._files)
451 m._fmap = set(m._files)
452 orig_matchfn = m.matchfn
452 orig_matchfn = m.matchfn
453 m.matchfn = lambda f: (lfutil.isstandin(f) and
453 m.matchfn = lambda f: (lfutil.isstandin(f) and
454 (f in manifest) and
454 (f in manifest) and
455 orig_matchfn(lfutil.splitstandin(f)) or
455 orig_matchfn(lfutil.splitstandin(f)) or
456 None)
456 None)
457 return m
457 return m
458 oldmatch = installmatchfn(override_match)
458 oldmatch = installmatchfn(override_match)
459 listpats = []
459 listpats = []
460 for pat in pats:
460 for pat in pats:
461 if match_.patkind(pat) is not None:
461 if match_.patkind(pat) is not None:
462 listpats.append(pat)
462 listpats.append(pat)
463 else:
463 else:
464 listpats.append(makestandin(pat))
464 listpats.append(makestandin(pat))
465
465
466 try:
466 try:
467 origcopyfile = util.copyfile
467 origcopyfile = util.copyfile
468 copiedfiles = []
468 copiedfiles = []
469 def override_copyfile(src, dest):
469 def override_copyfile(src, dest):
470 if (lfutil.shortname in src and
470 if (lfutil.shortname in src and
471 dest.startswith(repo.wjoin(lfutil.shortname))):
471 dest.startswith(repo.wjoin(lfutil.shortname))):
472 destlfile = dest.replace(lfutil.shortname, '')
472 destlfile = dest.replace(lfutil.shortname, '')
473 if not opts['force'] and os.path.exists(destlfile):
473 if not opts['force'] and os.path.exists(destlfile):
474 raise IOError('',
474 raise IOError('',
475 _('destination largefile already exists'))
475 _('destination largefile already exists'))
476 copiedfiles.append((src, dest))
476 copiedfiles.append((src, dest))
477 origcopyfile(src, dest)
477 origcopyfile(src, dest)
478
478
479 util.copyfile = override_copyfile
479 util.copyfile = override_copyfile
480 result += orig(ui, repo, listpats, opts, rename)
480 result += orig(ui, repo, listpats, opts, rename)
481 finally:
481 finally:
482 util.copyfile = origcopyfile
482 util.copyfile = origcopyfile
483
483
484 lfdirstate = lfutil.openlfdirstate(ui, repo)
484 lfdirstate = lfutil.openlfdirstate(ui, repo)
485 for (src, dest) in copiedfiles:
485 for (src, dest) in copiedfiles:
486 if (lfutil.shortname in src and
486 if (lfutil.shortname in src and
487 dest.startswith(repo.wjoin(lfutil.shortname))):
487 dest.startswith(repo.wjoin(lfutil.shortname))):
488 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
488 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
489 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
489 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
490 destlfiledir = os.path.dirname(destlfile) or '.'
490 destlfiledir = os.path.dirname(destlfile) or '.'
491 if not os.path.isdir(destlfiledir):
491 if not os.path.isdir(destlfiledir):
492 os.makedirs(destlfiledir)
492 os.makedirs(destlfiledir)
493 if rename:
493 if rename:
494 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
494 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
495 lfdirstate.remove(srclfile)
495 lfdirstate.remove(srclfile)
496 else:
496 else:
497 util.copyfile(srclfile, destlfile)
497 util.copyfile(srclfile, destlfile)
498 lfdirstate.add(destlfile)
498 lfdirstate.add(destlfile)
499 lfdirstate.write()
499 lfdirstate.write()
500 except util.Abort, e:
500 except util.Abort, e:
501 if str(e) != 'no files to copy':
501 if str(e) != 'no files to copy':
502 raise e
502 raise e
503 else:
503 else:
504 nolfiles = True
504 nolfiles = True
505 finally:
505 finally:
506 restorematchfn()
506 restorematchfn()
507 wlock.release()
507 wlock.release()
508
508
509 if nolfiles and nonormalfiles:
509 if nolfiles and nonormalfiles:
510 raise util.Abort(_('no files to copy'))
510 raise util.Abort(_('no files to copy'))
511
511
512 return result
512 return result
513
513
514 # When the user calls revert, we have to be careful to not revert any
514 # When the user calls revert, we have to be careful to not revert any
515 # changes to other largefiles accidentally. This means we have to keep
515 # changes to other largefiles accidentally. This means we have to keep
516 # track of the largefiles that are being reverted so we only pull down
516 # track of the largefiles that are being reverted so we only pull down
517 # the necessary largefiles.
517 # the necessary largefiles.
518 #
518 #
519 # Standins are only updated (to match the hash of largefiles) before
519 # Standins are only updated (to match the hash of largefiles) before
520 # commits. Update the standins then run the original revert, changing
520 # commits. Update the standins then run the original revert, changing
521 # the matcher to hit standins instead of largefiles. Based on the
521 # the matcher to hit standins instead of largefiles. Based on the
522 # resulting standins update the largefiles. Then return the standins
522 # resulting standins update the largefiles. Then return the standins
523 # to their proper state
523 # to their proper state
524 def override_revert(orig, ui, repo, *pats, **opts):
524 def override_revert(orig, ui, repo, *pats, **opts):
525 # Because we put the standins in a bad state (by updating them)
525 # Because we put the standins in a bad state (by updating them)
526 # and then return them to a correct state we need to lock to
526 # and then return them to a correct state we need to lock to
527 # prevent others from changing them in their incorrect state.
527 # prevent others from changing them in their incorrect state.
528 wlock = repo.wlock()
528 wlock = repo.wlock()
529 try:
529 try:
530 lfdirstate = lfutil.openlfdirstate(ui, repo)
530 lfdirstate = lfutil.openlfdirstate(ui, repo)
531 (modified, added, removed, missing, unknown, ignored, clean) = \
531 (modified, added, removed, missing, unknown, ignored, clean) = \
532 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
532 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
533 for lfile in modified:
533 for lfile in modified:
534 lfutil.updatestandin(repo, lfutil.standin(lfile))
534 lfutil.updatestandin(repo, lfutil.standin(lfile))
535 for lfile in missing:
535 for lfile in missing:
536 os.unlink(repo.wjoin(lfutil.standin(lfile)))
536 os.unlink(repo.wjoin(lfutil.standin(lfile)))
537
537
538 try:
538 try:
539 ctx = repo[opts.get('rev')]
539 ctx = repo[opts.get('rev')]
540 oldmatch = None # for the closure
540 oldmatch = None # for the closure
541 def override_match(ctx, pats=[], opts={}, globbed=False,
541 def override_match(ctx, pats=[], opts={}, globbed=False,
542 default='relpath'):
542 default='relpath'):
543 match = oldmatch(ctx, pats, opts, globbed, default)
543 match = oldmatch(ctx, pats, opts, globbed, default)
544 m = copy.copy(match)
544 m = copy.copy(match)
545 def tostandin(f):
545 def tostandin(f):
546 if lfutil.standin(f) in ctx:
546 if lfutil.standin(f) in ctx:
547 return lfutil.standin(f)
547 return lfutil.standin(f)
548 elif lfutil.standin(f) in repo[None]:
548 elif lfutil.standin(f) in repo[None]:
549 return None
549 return None
550 return f
550 return f
551 m._files = [tostandin(f) for f in m._files]
551 m._files = [tostandin(f) for f in m._files]
552 m._files = [f for f in m._files if f is not None]
552 m._files = [f for f in m._files if f is not None]
553 m._fmap = set(m._files)
553 m._fmap = set(m._files)
554 orig_matchfn = m.matchfn
554 orig_matchfn = m.matchfn
555 def matchfn(f):
555 def matchfn(f):
556 if lfutil.isstandin(f):
556 if lfutil.isstandin(f):
557 # We need to keep track of what largefiles are being
557 # We need to keep track of what largefiles are being
558 # matched so we know which ones to update later --
558 # matched so we know which ones to update later --
559 # otherwise we accidentally revert changes to other
559 # otherwise we accidentally revert changes to other
560 # largefiles. This is repo-specific, so duckpunch the
560 # largefiles. This is repo-specific, so duckpunch the
561 # repo object to keep the list of largefiles for us
561 # repo object to keep the list of largefiles for us
562 # later.
562 # later.
563 if orig_matchfn(lfutil.splitstandin(f)) and \
563 if orig_matchfn(lfutil.splitstandin(f)) and \
564 (f in repo[None] or f in ctx):
564 (f in repo[None] or f in ctx):
565 lfileslist = getattr(repo, '_lfilestoupdate', [])
565 lfileslist = getattr(repo, '_lfilestoupdate', [])
566 lfileslist.append(lfutil.splitstandin(f))
566 lfileslist.append(lfutil.splitstandin(f))
567 repo._lfilestoupdate = lfileslist
567 repo._lfilestoupdate = lfileslist
568 return True
568 return True
569 else:
569 else:
570 return False
570 return False
571 return orig_matchfn(f)
571 return orig_matchfn(f)
572 m.matchfn = matchfn
572 m.matchfn = matchfn
573 return m
573 return m
574 oldmatch = installmatchfn(override_match)
574 oldmatch = installmatchfn(override_match)
575 scmutil.match
575 scmutil.match
576 matches = override_match(repo[None], pats, opts)
576 matches = override_match(repo[None], pats, opts)
577 orig(ui, repo, *pats, **opts)
577 orig(ui, repo, *pats, **opts)
578 finally:
578 finally:
579 restorematchfn()
579 restorematchfn()
580 lfileslist = getattr(repo, '_lfilestoupdate', [])
580 lfileslist = getattr(repo, '_lfilestoupdate', [])
581 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
581 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
582 printmessage=False)
582 printmessage=False)
583
583
584 # empty out the largefiles list so we start fresh next time
584 # empty out the largefiles list so we start fresh next time
585 repo._lfilestoupdate = []
585 repo._lfilestoupdate = []
586 for lfile in modified:
586 for lfile in modified:
587 if lfile in lfileslist:
587 if lfile in lfileslist:
588 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
588 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
589 in repo['.']:
589 in repo['.']:
590 lfutil.writestandin(repo, lfutil.standin(lfile),
590 lfutil.writestandin(repo, lfutil.standin(lfile),
591 repo['.'][lfile].data().strip(),
591 repo['.'][lfile].data().strip(),
592 'x' in repo['.'][lfile].flags())
592 'x' in repo['.'][lfile].flags())
593 lfdirstate = lfutil.openlfdirstate(ui, repo)
593 lfdirstate = lfutil.openlfdirstate(ui, repo)
594 for lfile in added:
594 for lfile in added:
595 standin = lfutil.standin(lfile)
595 standin = lfutil.standin(lfile)
596 if standin not in ctx and (standin in matches or opts.get('all')):
596 if standin not in ctx and (standin in matches or opts.get('all')):
597 if lfile in lfdirstate:
597 if lfile in lfdirstate:
598 lfdirstate.drop(lfile)
598 lfdirstate.drop(lfile)
599 util.unlinkpath(repo.wjoin(standin))
599 util.unlinkpath(repo.wjoin(standin))
600 lfdirstate.write()
600 lfdirstate.write()
601 finally:
601 finally:
602 wlock.release()
602 wlock.release()
603
603
604 def hg_update(orig, repo, node):
604 def hg_update(orig, repo, node):
605 result = orig(repo, node)
605 result = orig(repo, node)
606 lfcommands.updatelfiles(repo.ui, repo)
606 lfcommands.updatelfiles(repo.ui, repo)
607 return result
607 return result
608
608
609 def hg_clean(orig, repo, node, show_stats=True):
609 def hg_clean(orig, repo, node, show_stats=True):
610 result = orig(repo, node, show_stats)
610 result = orig(repo, node, show_stats)
611 lfcommands.updatelfiles(repo.ui, repo)
611 lfcommands.updatelfiles(repo.ui, repo)
612 return result
612 return result
613
613
614 def hg_merge(orig, repo, node, force=None, remind=True):
614 def hg_merge(orig, repo, node, force=None, remind=True):
615 # Mark the repo as being in the middle of a merge, so that
615 # Mark the repo as being in the middle of a merge, so that
616 # updatelfiles() will know that it needs to trust the standins in
616 # updatelfiles() will know that it needs to trust the standins in
617 # the working copy, not in the standins in the current node
617 # the working copy, not in the standins in the current node
618 repo._ismerging = True
618 repo._ismerging = True
619 try:
619 try:
620 result = orig(repo, node, force, remind)
620 result = orig(repo, node, force, remind)
621 lfcommands.updatelfiles(repo.ui, repo)
621 lfcommands.updatelfiles(repo.ui, repo)
622 finally:
622 finally:
623 repo._ismerging = False
623 repo._ismerging = False
624 return result
624 return result
625
625
626 # When we rebase a repository with remotely changed largefiles, we need to
626 # When we rebase a repository with remotely changed largefiles, we need to
627 # take some extra care so that the largefiles are correctly updated in the
627 # take some extra care so that the largefiles are correctly updated in the
628 # working copy
628 # working copy
629 def override_pull(orig, ui, repo, source=None, **opts):
629 def override_pull(orig, ui, repo, source=None, **opts):
630 if opts.get('rebase', False):
630 if opts.get('rebase', False):
631 repo._isrebasing = True
631 repo._isrebasing = True
632 try:
632 try:
633 if opts.get('update'):
633 if opts.get('update'):
634 del opts['update']
634 del opts['update']
635 ui.debug('--update and --rebase are not compatible, ignoring '
635 ui.debug('--update and --rebase are not compatible, ignoring '
636 'the update flag\n')
636 'the update flag\n')
637 del opts['rebase']
637 del opts['rebase']
638 cmdutil.bailifchanged(repo)
638 cmdutil.bailifchanged(repo)
639 revsprepull = len(repo)
639 revsprepull = len(repo)
640 origpostincoming = commands.postincoming
640 origpostincoming = commands.postincoming
641 def _dummy(*args, **kwargs):
641 def _dummy(*args, **kwargs):
642 pass
642 pass
643 commands.postincoming = _dummy
643 commands.postincoming = _dummy
644 repo.lfpullsource = source
644 repo.lfpullsource = source
645 if not source:
645 if not source:
646 source = 'default'
646 source = 'default'
647 try:
647 try:
648 result = commands.pull(ui, repo, source, **opts)
648 result = commands.pull(ui, repo, source, **opts)
649 finally:
649 finally:
650 commands.postincoming = origpostincoming
650 commands.postincoming = origpostincoming
651 revspostpull = len(repo)
651 revspostpull = len(repo)
652 if revspostpull > revsprepull:
652 if revspostpull > revsprepull:
653 result = result or rebase.rebase(ui, repo)
653 result = result or rebase.rebase(ui, repo)
654 finally:
654 finally:
655 repo._isrebasing = False
655 repo._isrebasing = False
656 else:
656 else:
657 repo.lfpullsource = source
657 repo.lfpullsource = source
658 if not source:
658 if not source:
659 source = 'default'
659 source = 'default'
660 result = orig(ui, repo, source, **opts)
660 result = orig(ui, repo, source, **opts)
661 # If we do not have the new largefiles for any new heads we pulled, we
661 # If we do not have the new largefiles for any new heads we pulled, we
662 # will run into a problem later if we try to merge or rebase with one of
662 # will run into a problem later if we try to merge or rebase with one of
663 # these heads, so cache the largefiles now direclty into the system
663 # these heads, so cache the largefiles now direclty into the system
664 # cache.
664 # cache.
665 ui.status(_("caching new largefiles\n"))
665 ui.status(_("caching new largefiles\n"))
666 numcached = 0
666 numcached = 0
667 branches = repo.branchmap()
667 branches = repo.branchmap()
668 for branch in branches:
668 for branch in branches:
669 heads = repo.branchheads(branch)
669 heads = repo.branchheads(branch)
670 for head in heads:
670 for head in heads:
671 (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
671 (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
672 numcached += len(cached)
672 numcached += len(cached)
673 ui.status(_("%d largefiles cached\n" % numcached))
673 ui.status(_("%d largefiles cached\n" % numcached))
674 return result
674 return result
675
675
676 def override_rebase(orig, ui, repo, **opts):
676 def override_rebase(orig, ui, repo, **opts):
677 repo._isrebasing = True
677 repo._isrebasing = True
678 try:
678 try:
679 orig(ui, repo, **opts)
679 orig(ui, repo, **opts)
680 finally:
680 finally:
681 repo._isrebasing = False
681 repo._isrebasing = False
682
682
683 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
683 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
684 prefix=None, mtime=None, subrepos=None):
684 prefix=None, mtime=None, subrepos=None):
685 # No need to lock because we are only reading history and
685 # No need to lock because we are only reading history and
686 # largefile caches, neither of which are modified.
686 # largefile caches, neither of which are modified.
687 lfcommands.cachelfiles(repo.ui, repo, node)
687 lfcommands.cachelfiles(repo.ui, repo, node)
688
688
689 if kind not in archival.archivers:
689 if kind not in archival.archivers:
690 raise util.Abort(_("unknown archive type '%s'") % kind)
690 raise util.Abort(_("unknown archive type '%s'") % kind)
691
691
692 ctx = repo[node]
692 ctx = repo[node]
693
693
694 if kind == 'files':
694 if kind == 'files':
695 if prefix:
695 if prefix:
696 raise util.Abort(
696 raise util.Abort(
697 _('cannot give prefix when archiving to files'))
697 _('cannot give prefix when archiving to files'))
698 else:
698 else:
699 prefix = archival.tidyprefix(dest, kind, prefix)
699 prefix = archival.tidyprefix(dest, kind, prefix)
700
700
701 def write(name, mode, islink, getdata):
701 def write(name, mode, islink, getdata):
702 if matchfn and not matchfn(name):
702 if matchfn and not matchfn(name):
703 return
703 return
704 data = getdata()
704 data = getdata()
705 if decode:
705 if decode:
706 data = repo.wwritedata(name, data)
706 data = repo.wwritedata(name, data)
707 archiver.addfile(prefix + name, mode, islink, data)
707 archiver.addfile(prefix + name, mode, islink, data)
708
708
709 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
709 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
710
710
711 if repo.ui.configbool("ui", "archivemeta", True):
711 if repo.ui.configbool("ui", "archivemeta", True):
712 def metadata():
712 def metadata():
713 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
713 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
714 hex(repo.changelog.node(0)), hex(node), ctx.branch())
714 hex(repo.changelog.node(0)), hex(node), ctx.branch())
715
715
716 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
716 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
717 if repo.tagtype(t) == 'global')
717 if repo.tagtype(t) == 'global')
718 if not tags:
718 if not tags:
719 repo.ui.pushbuffer()
719 repo.ui.pushbuffer()
720 opts = {'template': '{latesttag}\n{latesttagdistance}',
720 opts = {'template': '{latesttag}\n{latesttagdistance}',
721 'style': '', 'patch': None, 'git': None}
721 'style': '', 'patch': None, 'git': None}
722 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
722 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
723 ltags, dist = repo.ui.popbuffer().split('\n')
723 ltags, dist = repo.ui.popbuffer().split('\n')
724 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
724 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
725 tags += 'latesttagdistance: %s\n' % dist
725 tags += 'latesttagdistance: %s\n' % dist
726
726
727 return base + tags
727 return base + tags
728
728
729 write('.hg_archival.txt', 0644, False, metadata)
729 write('.hg_archival.txt', 0644, False, metadata)
730
730
731 for f in ctx:
731 for f in ctx:
732 ff = ctx.flags(f)
732 ff = ctx.flags(f)
733 getdata = ctx[f].data
733 getdata = ctx[f].data
734 if lfutil.isstandin(f):
734 if lfutil.isstandin(f):
735 path = lfutil.findfile(repo, getdata().strip())
735 path = lfutil.findfile(repo, getdata().strip())
736 if path is None:
736 if path is None:
737 raise util.Abort(
737 raise util.Abort(
738 _('largefile %s not found in repo store or system cache')
738 _('largefile %s not found in repo store or system cache')
739 % lfutil.splitstandin(f))
739 % lfutil.splitstandin(f))
740 f = lfutil.splitstandin(f)
740 f = lfutil.splitstandin(f)
741
741
742 def getdatafn():
742 def getdatafn():
743 fd = None
743 fd = None
744 try:
744 try:
745 fd = open(path, 'rb')
745 fd = open(path, 'rb')
746 return fd.read()
746 return fd.read()
747 finally:
747 finally:
748 if fd:
748 if fd:
749 fd.close()
749 fd.close()
750
750
751 getdata = getdatafn
751 getdata = getdatafn
752 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
752 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
753
753
754 if subrepos:
754 if subrepos:
755 for subpath in ctx.substate:
755 for subpath in ctx.substate:
756 sub = ctx.sub(subpath)
756 sub = ctx.sub(subpath)
757 sub.archive(repo.ui, archiver, prefix)
757 sub.archive(repo.ui, archiver, prefix)
758
758
759 archiver.done()
759 archiver.done()
760
760
761 # If a largefile is modified, the change is not reflected in its
761 # If a largefile is modified, the change is not reflected in its
762 # standin until a commit. cmdutil.bailifchanged() raises an exception
762 # standin until a commit. cmdutil.bailifchanged() raises an exception
763 # if the repo has uncommitted changes. Wrap it to also check if
763 # if the repo has uncommitted changes. Wrap it to also check if
764 # largefiles were changed. This is used by bisect and backout.
764 # largefiles were changed. This is used by bisect and backout.
765 def override_bailifchanged(orig, repo):
765 def override_bailifchanged(orig, repo):
766 orig(repo)
766 orig(repo)
767 repo.lfstatus = True
767 repo.lfstatus = True
768 modified, added, removed, deleted = repo.status()[:4]
768 modified, added, removed, deleted = repo.status()[:4]
769 repo.lfstatus = False
769 repo.lfstatus = False
770 if modified or added or removed or deleted:
770 if modified or added or removed or deleted:
771 raise util.Abort(_('outstanding uncommitted changes'))
771 raise util.Abort(_('outstanding uncommitted changes'))
772
772
773 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
773 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
774 def override_fetch(orig, ui, repo, *pats, **opts):
774 def override_fetch(orig, ui, repo, *pats, **opts):
775 repo.lfstatus = True
775 repo.lfstatus = True
776 modified, added, removed, deleted = repo.status()[:4]
776 modified, added, removed, deleted = repo.status()[:4]
777 repo.lfstatus = False
777 repo.lfstatus = False
778 if modified or added or removed or deleted:
778 if modified or added or removed or deleted:
779 raise util.Abort(_('outstanding uncommitted changes'))
779 raise util.Abort(_('outstanding uncommitted changes'))
780 return orig(ui, repo, *pats, **opts)
780 return orig(ui, repo, *pats, **opts)
781
781
782 def override_forget(orig, ui, repo, *pats, **opts):
782 def override_forget(orig, ui, repo, *pats, **opts):
783 installnormalfilesmatchfn(repo[None].manifest())
783 installnormalfilesmatchfn(repo[None].manifest())
784 orig(ui, repo, *pats, **opts)
784 orig(ui, repo, *pats, **opts)
785 restorematchfn()
785 restorematchfn()
786 m = scmutil.match(repo[None], pats, opts)
786 m = scmutil.match(repo[None], pats, opts)
787
787
788 try:
788 try:
789 repo.lfstatus = True
789 repo.lfstatus = True
790 s = repo.status(match=m, clean=True)
790 s = repo.status(match=m, clean=True)
791 finally:
791 finally:
792 repo.lfstatus = False
792 repo.lfstatus = False
793 forget = sorted(s[0] + s[1] + s[3] + s[6])
793 forget = sorted(s[0] + s[1] + s[3] + s[6])
794 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
794 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
795
795
796 for f in forget:
796 for f in forget:
797 if lfutil.standin(f) not in repo.dirstate and not \
797 if lfutil.standin(f) not in repo.dirstate and not \
798 os.path.isdir(m.rel(lfutil.standin(f))):
798 os.path.isdir(m.rel(lfutil.standin(f))):
799 ui.warn(_('not removing %s: file is already untracked\n')
799 ui.warn(_('not removing %s: file is already untracked\n')
800 % m.rel(f))
800 % m.rel(f))
801
801
802 for f in forget:
802 for f in forget:
803 if ui.verbose or not m.exact(f):
803 if ui.verbose or not m.exact(f):
804 ui.status(_('removing %s\n') % m.rel(f))
804 ui.status(_('removing %s\n') % m.rel(f))
805
805
806 # Need to lock because standin files are deleted then removed from the
806 # Need to lock because standin files are deleted then removed from the
807 # repository and we could race inbetween.
807 # repository and we could race inbetween.
808 wlock = repo.wlock()
808 wlock = repo.wlock()
809 try:
809 try:
810 lfdirstate = lfutil.openlfdirstate(ui, repo)
810 lfdirstate = lfutil.openlfdirstate(ui, repo)
811 for f in forget:
811 for f in forget:
812 if lfdirstate[f] == 'a':
812 if lfdirstate[f] == 'a':
813 lfdirstate.drop(f)
813 lfdirstate.drop(f)
814 else:
814 else:
815 lfdirstate.remove(f)
815 lfdirstate.remove(f)
816 lfdirstate.write()
816 lfdirstate.write()
817 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
817 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
818 unlink=True)
818 unlink=True)
819 finally:
819 finally:
820 wlock.release()
820 wlock.release()
821
821
822 def getoutgoinglfiles(ui, repo, dest=None, **opts):
822 def getoutgoinglfiles(ui, repo, dest=None, **opts):
823 dest = ui.expandpath(dest or 'default-push', dest or 'default')
823 dest = ui.expandpath(dest or 'default-push', dest or 'default')
824 dest, branches = hg.parseurl(dest, opts.get('branch'))
824 dest, branches = hg.parseurl(dest, opts.get('branch'))
825 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
825 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
826 if revs:
826 if revs:
827 revs = [repo.lookup(rev) for rev in revs]
827 revs = [repo.lookup(rev) for rev in revs]
828
828
829 remoteui = hg.remoteui
829 remoteui = hg.remoteui
830
830
831 try:
831 try:
832 remote = hg.repository(remoteui(repo, opts), dest)
832 remote = hg.repository(remoteui(repo, opts), dest)
833 except error.RepoError:
833 except error.RepoError:
834 return None
834 return None
835 o = lfutil.findoutgoing(repo, remote, False)
835 o = lfutil.findoutgoing(repo, remote, False)
836 if not o:
836 if not o:
837 return None
837 return None
838 o = repo.changelog.nodesbetween(o, revs)[0]
838 o = repo.changelog.nodesbetween(o, revs)[0]
839 if opts.get('newest_first'):
839 if opts.get('newest_first'):
840 o.reverse()
840 o.reverse()
841
841
842 toupload = set()
842 toupload = set()
843 for n in o:
843 for n in o:
844 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
844 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
845 ctx = repo[n]
845 ctx = repo[n]
846 files = set(ctx.files())
846 files = set(ctx.files())
847 if len(parents) == 2:
847 if len(parents) == 2:
848 mc = ctx.manifest()
848 mc = ctx.manifest()
849 mp1 = ctx.parents()[0].manifest()
849 mp1 = ctx.parents()[0].manifest()
850 mp2 = ctx.parents()[1].manifest()
850 mp2 = ctx.parents()[1].manifest()
851 for f in mp1:
851 for f in mp1:
852 if f not in mc:
852 if f not in mc:
853 files.add(f)
853 files.add(f)
854 for f in mp2:
854 for f in mp2:
855 if f not in mc:
855 if f not in mc:
856 files.add(f)
856 files.add(f)
857 for f in mc:
857 for f in mc:
858 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
858 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
859 files.add(f)
859 files.add(f)
860 toupload = toupload.union(
860 toupload = toupload.union(
861 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
861 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
862 return toupload
862 return toupload
863
863
864 def override_outgoing(orig, ui, repo, dest=None, **opts):
864 def override_outgoing(orig, ui, repo, dest=None, **opts):
865 orig(ui, repo, dest, **opts)
865 orig(ui, repo, dest, **opts)
866
866
867 if opts.pop('large', None):
867 if opts.pop('large', None):
868 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
868 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
869 if toupload is None:
869 if toupload is None:
870 ui.status(_('largefiles: No remote repo\n'))
870 ui.status(_('largefiles: No remote repo\n'))
871 else:
871 else:
872 ui.status(_('largefiles to upload:\n'))
872 ui.status(_('largefiles to upload:\n'))
873 for file in toupload:
873 for file in toupload:
874 ui.status(lfutil.splitstandin(file) + '\n')
874 ui.status(lfutil.splitstandin(file) + '\n')
875 ui.status('\n')
875 ui.status('\n')
876
876
877 def override_summary(orig, ui, repo, *pats, **opts):
877 def override_summary(orig, ui, repo, *pats, **opts):
878 try:
878 try:
879 repo.lfstatus = True
879 repo.lfstatus = True
880 orig(ui, repo, *pats, **opts)
880 orig(ui, repo, *pats, **opts)
881 finally:
881 finally:
882 repo.lfstatus = False
882 repo.lfstatus = False
883
883
884 if opts.pop('large', None):
884 if opts.pop('large', None):
885 toupload = getoutgoinglfiles(ui, repo, None, **opts)
885 toupload = getoutgoinglfiles(ui, repo, None, **opts)
886 if toupload is None:
886 if toupload is None:
887 ui.status(_('largefiles: No remote repo\n'))
887 ui.status(_('largefiles: No remote repo\n'))
888 else:
888 else:
889 ui.status(_('largefiles: %d to upload\n') % len(toupload))
889 ui.status(_('largefiles: %d to upload\n') % len(toupload))
890
890
891 def override_addremove(orig, ui, repo, *pats, **opts):
891 def override_addremove(orig, ui, repo, *pats, **opts):
892 # Get the list of missing largefiles so we can remove them
892 # Get the list of missing largefiles so we can remove them
893 lfdirstate = lfutil.openlfdirstate(ui, repo)
893 lfdirstate = lfutil.openlfdirstate(ui, repo)
894 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
894 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
895 False, False)
895 False, False)
896 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
896 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
897
897
898 # Call into the normal remove code, but the removing of the standin, we want
898 # Call into the normal remove code, but the removing of the standin, we want
899 # to have handled by original addremove. Monkey patching here makes sure
899 # to have handled by original addremove. Monkey patching here makes sure
900 # we don't remove the standin in the largefiles code, preventing a very
900 # we don't remove the standin in the largefiles code, preventing a very
901 # confused state later.
901 # confused state later.
902 if missing:
902 if missing:
903 repo._isaddremove = True
903 repo._isaddremove = True
904 remove_largefiles(ui, repo, *missing, **opts)
904 remove_largefiles(ui, repo, *missing, **opts)
905 repo._isaddremove = False
905 repo._isaddremove = False
906 # Call into the normal add code, and any files that *should* be added as
906 # Call into the normal add code, and any files that *should* be added as
907 # largefiles will be
907 # largefiles will be
908 add_largefiles(ui, repo, *pats, **opts)
908 add_largefiles(ui, repo, *pats, **opts)
909 # Now that we've handled largefiles, hand off to the original addremove
909 # Now that we've handled largefiles, hand off to the original addremove
910 # function to take care of the rest. Make sure it doesn't do anything with
910 # function to take care of the rest. Make sure it doesn't do anything with
911 # largefiles by installing a matcher that will ignore them.
911 # largefiles by installing a matcher that will ignore them.
912 installnormalfilesmatchfn(repo[None].manifest())
912 installnormalfilesmatchfn(repo[None].manifest())
913 result = orig(ui, repo, *pats, **opts)
913 result = orig(ui, repo, *pats, **opts)
914 restorematchfn()
914 restorematchfn()
915 return result
915 return result
916
916
917 # Calling purge with --all will cause the largefiles to be deleted.
917 # Calling purge with --all will cause the largefiles to be deleted.
918 # Override repo.status to prevent this from happening.
918 # Override repo.status to prevent this from happening.
919 def override_purge(orig, ui, repo, *dirs, **opts):
919 def override_purge(orig, ui, repo, *dirs, **opts):
920 oldstatus = repo.status
920 oldstatus = repo.status
921 def override_status(node1='.', node2=None, match=None, ignored=False,
921 def override_status(node1='.', node2=None, match=None, ignored=False,
922 clean=False, unknown=False, listsubrepos=False):
922 clean=False, unknown=False, listsubrepos=False):
923 r = oldstatus(node1, node2, match, ignored, clean, unknown,
923 r = oldstatus(node1, node2, match, ignored, clean, unknown,
924 listsubrepos)
924 listsubrepos)
925 lfdirstate = lfutil.openlfdirstate(ui, repo)
925 lfdirstate = lfutil.openlfdirstate(ui, repo)
926 modified, added, removed, deleted, unknown, ignored, clean = r
926 modified, added, removed, deleted, unknown, ignored, clean = r
927 unknown = [f for f in unknown if lfdirstate[f] == '?']
927 unknown = [f for f in unknown if lfdirstate[f] == '?']
928 ignored = [f for f in ignored if lfdirstate[f] == '?']
928 ignored = [f for f in ignored if lfdirstate[f] == '?']
929 return modified, added, removed, deleted, unknown, ignored, clean
929 return modified, added, removed, deleted, unknown, ignored, clean
930 repo.status = override_status
930 repo.status = override_status
931 orig(ui, repo, *dirs, **opts)
931 orig(ui, repo, *dirs, **opts)
932 repo.status = oldstatus
932 repo.status = oldstatus
933
933
934 def override_rollback(orig, ui, repo, **opts):
934 def override_rollback(orig, ui, repo, **opts):
935 result = orig(ui, repo, **opts)
935 result = orig(ui, repo, **opts)
936 merge.update(repo, node=None, branchmerge=False, force=True,
936 merge.update(repo, node=None, branchmerge=False, force=True,
937 partial=lfutil.isstandin)
937 partial=lfutil.isstandin)
938 wlock = repo.wlock()
938 wlock = repo.wlock()
939 try:
939 try:
940 lfdirstate = lfutil.openlfdirstate(ui, repo)
940 lfdirstate = lfutil.openlfdirstate(ui, repo)
941 lfiles = lfutil.listlfiles(repo)
941 lfiles = lfutil.listlfiles(repo)
942 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
942 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
943 for file in lfiles:
943 for file in lfiles:
944 if file in oldlfiles:
944 if file in oldlfiles:
945 lfdirstate.normallookup(file)
945 lfdirstate.normallookup(file)
946 else:
946 else:
947 lfdirstate.add(file)
947 lfdirstate.add(file)
948 lfdirstate.write()
948 lfdirstate.write()
949 finally:
949 finally:
950 wlock.release()
950 wlock.release()
951 return result
951 return result
952
952
953 def override_transplant(orig, ui, repo, *revs, **opts):
953 def override_transplant(orig, ui, repo, *revs, **opts):
954 try:
954 try:
955 repo._istransplanting = True
955 repo._istransplanting = True
956 result = orig(ui, repo, *revs, **opts)
956 result = orig(ui, repo, *revs, **opts)
957 lfcommands.updatelfiles(ui, repo, filelist=None,
957 lfcommands.updatelfiles(ui, repo, filelist=None,
958 printmessage=False)
958 printmessage=False)
959 finally:
959 finally:
960 repo._istransplanting = False
960 repo._istransplanting = False
961 return result
961 return result
@@ -1,1176 +1,1172 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex
8 from node import nullid, nullrev, short, hex
9 from i18n import _
9 from i18n import _
10 import ancestor, mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import ancestor, mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, stat
12 import os, errno, stat
13
13
14 propertycache = util.propertycache
14 propertycache = util.propertycache
15
15
16 class changectx(object):
16 class changectx(object):
17 """A changecontext object makes access to data related to a particular
17 """A changecontext object makes access to data related to a particular
18 changeset convenient."""
18 changeset convenient."""
19 def __init__(self, repo, changeid=''):
19 def __init__(self, repo, changeid=''):
20 """changeid is a revision number, node, or tag"""
20 """changeid is a revision number, node, or tag"""
21 if changeid == '':
21 if changeid == '':
22 changeid = '.'
22 changeid = '.'
23 self._repo = repo
23 self._repo = repo
24 if isinstance(changeid, (long, int)):
24 if isinstance(changeid, (long, int)):
25 self._rev = changeid
25 self._rev = changeid
26 self._node = self._repo.changelog.node(changeid)
26 self._node = self._repo.changelog.node(changeid)
27 else:
27 else:
28 self._node = self._repo.lookup(changeid)
28 self._node = self._repo.lookup(changeid)
29 self._rev = self._repo.changelog.rev(self._node)
29 self._rev = self._repo.changelog.rev(self._node)
30
30
31 def __str__(self):
31 def __str__(self):
32 return short(self.node())
32 return short(self.node())
33
33
34 def __int__(self):
34 def __int__(self):
35 return self.rev()
35 return self.rev()
36
36
37 def __repr__(self):
37 def __repr__(self):
38 return "<changectx %s>" % str(self)
38 return "<changectx %s>" % str(self)
39
39
40 def __hash__(self):
40 def __hash__(self):
41 try:
41 try:
42 return hash(self._rev)
42 return hash(self._rev)
43 except AttributeError:
43 except AttributeError:
44 return id(self)
44 return id(self)
45
45
46 def __eq__(self, other):
46 def __eq__(self, other):
47 try:
47 try:
48 return self._rev == other._rev
48 return self._rev == other._rev
49 except AttributeError:
49 except AttributeError:
50 return False
50 return False
51
51
52 def __ne__(self, other):
52 def __ne__(self, other):
53 return not (self == other)
53 return not (self == other)
54
54
55 def __nonzero__(self):
55 def __nonzero__(self):
56 return self._rev != nullrev
56 return self._rev != nullrev
57
57
58 @propertycache
58 @propertycache
59 def _changeset(self):
59 def _changeset(self):
60 return self._repo.changelog.read(self.node())
60 return self._repo.changelog.read(self.node())
61
61
62 @propertycache
62 @propertycache
63 def _manifest(self):
63 def _manifest(self):
64 return self._repo.manifest.read(self._changeset[0])
64 return self._repo.manifest.read(self._changeset[0])
65
65
66 @propertycache
66 @propertycache
67 def _manifestdelta(self):
67 def _manifestdelta(self):
68 return self._repo.manifest.readdelta(self._changeset[0])
68 return self._repo.manifest.readdelta(self._changeset[0])
69
69
70 @propertycache
70 @propertycache
71 def _parents(self):
71 def _parents(self):
72 p = self._repo.changelog.parentrevs(self._rev)
72 p = self._repo.changelog.parentrevs(self._rev)
73 if p[1] == nullrev:
73 if p[1] == nullrev:
74 p = p[:-1]
74 p = p[:-1]
75 return [changectx(self._repo, x) for x in p]
75 return [changectx(self._repo, x) for x in p]
76
76
77 @propertycache
77 @propertycache
78 def substate(self):
78 def substate(self):
79 return subrepo.state(self, self._repo.ui)
79 return subrepo.state(self, self._repo.ui)
80
80
81 def __contains__(self, key):
81 def __contains__(self, key):
82 return key in self._manifest
82 return key in self._manifest
83
83
84 def __getitem__(self, key):
84 def __getitem__(self, key):
85 return self.filectx(key)
85 return self.filectx(key)
86
86
87 def __iter__(self):
87 def __iter__(self):
88 for f in sorted(self._manifest):
88 for f in sorted(self._manifest):
89 yield f
89 yield f
90
90
91 def changeset(self):
91 def changeset(self):
92 return self._changeset
92 return self._changeset
93 def manifest(self):
93 def manifest(self):
94 return self._manifest
94 return self._manifest
95 def manifestnode(self):
95 def manifestnode(self):
96 return self._changeset[0]
96 return self._changeset[0]
97
97
98 def rev(self):
98 def rev(self):
99 return self._rev
99 return self._rev
100 def node(self):
100 def node(self):
101 return self._node
101 return self._node
102 def hex(self):
102 def hex(self):
103 return hex(self._node)
103 return hex(self._node)
104 def user(self):
104 def user(self):
105 return self._changeset[1]
105 return self._changeset[1]
106 def date(self):
106 def date(self):
107 return self._changeset[2]
107 return self._changeset[2]
108 def files(self):
108 def files(self):
109 return self._changeset[3]
109 return self._changeset[3]
110 def description(self):
110 def description(self):
111 return self._changeset[4]
111 return self._changeset[4]
112 def branch(self):
112 def branch(self):
113 return encoding.tolocal(self._changeset[5].get("branch"))
113 return encoding.tolocal(self._changeset[5].get("branch"))
114 def extra(self):
114 def extra(self):
115 return self._changeset[5]
115 return self._changeset[5]
116 def tags(self):
116 def tags(self):
117 return self._repo.nodetags(self._node)
117 return self._repo.nodetags(self._node)
118 def bookmarks(self):
118 def bookmarks(self):
119 return self._repo.nodebookmarks(self._node)
119 return self._repo.nodebookmarks(self._node)
120 def phase(self):
120 def phase(self):
121 if self._rev == -1:
121 if self._rev == -1:
122 return phases.public
122 return phases.public
123 if self._rev >= len(self._repo._phaserev):
123 if self._rev >= len(self._repo._phaserev):
124 # outdated cache
124 # outdated cache
125 del self._repo._phaserev
125 del self._repo._phaserev
126 return self._repo._phaserev[self._rev]
126 return self._repo._phaserev[self._rev]
127 def phasestr(self):
127 def phasestr(self):
128 return phases.phasenames[self.phase()]
128 return phases.phasenames[self.phase()]
129 def mutable(self):
129 def mutable(self):
130 return self._repo._phaserev[self._rev] > phases.public
130 return self._repo._phaserev[self._rev] > phases.public
131 def hidden(self):
131 def hidden(self):
132 return self._rev in self._repo.changelog.hiddenrevs
132 return self._rev in self._repo.changelog.hiddenrevs
133
133
134 def parents(self):
134 def parents(self):
135 """return contexts for each parent changeset"""
135 """return contexts for each parent changeset"""
136 return self._parents
136 return self._parents
137
137
138 def p1(self):
138 def p1(self):
139 return self._parents[0]
139 return self._parents[0]
140
140
141 def p2(self):
141 def p2(self):
142 if len(self._parents) == 2:
142 if len(self._parents) == 2:
143 return self._parents[1]
143 return self._parents[1]
144 return changectx(self._repo, -1)
144 return changectx(self._repo, -1)
145
145
146 def children(self):
146 def children(self):
147 """return contexts for each child changeset"""
147 """return contexts for each child changeset"""
148 c = self._repo.changelog.children(self._node)
148 c = self._repo.changelog.children(self._node)
149 return [changectx(self._repo, x) for x in c]
149 return [changectx(self._repo, x) for x in c]
150
150
151 def ancestors(self):
151 def ancestors(self):
152 for a in self._repo.changelog.ancestors(self._rev):
152 for a in self._repo.changelog.ancestors(self._rev):
153 yield changectx(self._repo, a)
153 yield changectx(self._repo, a)
154
154
155 def descendants(self):
155 def descendants(self):
156 for d in self._repo.changelog.descendants(self._rev):
156 for d in self._repo.changelog.descendants(self._rev):
157 yield changectx(self._repo, d)
157 yield changectx(self._repo, d)
158
158
159 def _fileinfo(self, path):
159 def _fileinfo(self, path):
160 if '_manifest' in self.__dict__:
160 if '_manifest' in self.__dict__:
161 try:
161 try:
162 return self._manifest[path], self._manifest.flags(path)
162 return self._manifest[path], self._manifest.flags(path)
163 except KeyError:
163 except KeyError:
164 raise error.LookupError(self._node, path,
164 raise error.LookupError(self._node, path,
165 _('not found in manifest'))
165 _('not found in manifest'))
166 if '_manifestdelta' in self.__dict__ or path in self.files():
166 if '_manifestdelta' in self.__dict__ or path in self.files():
167 if path in self._manifestdelta:
167 if path in self._manifestdelta:
168 return self._manifestdelta[path], self._manifestdelta.flags(path)
168 return self._manifestdelta[path], self._manifestdelta.flags(path)
169 node, flag = self._repo.manifest.find(self._changeset[0], path)
169 node, flag = self._repo.manifest.find(self._changeset[0], path)
170 if not node:
170 if not node:
171 raise error.LookupError(self._node, path,
171 raise error.LookupError(self._node, path,
172 _('not found in manifest'))
172 _('not found in manifest'))
173
173
174 return node, flag
174 return node, flag
175
175
176 def filenode(self, path):
176 def filenode(self, path):
177 return self._fileinfo(path)[0]
177 return self._fileinfo(path)[0]
178
178
179 def flags(self, path):
179 def flags(self, path):
180 try:
180 try:
181 return self._fileinfo(path)[1]
181 return self._fileinfo(path)[1]
182 except error.LookupError:
182 except error.LookupError:
183 return ''
183 return ''
184
184
185 def filectx(self, path, fileid=None, filelog=None):
185 def filectx(self, path, fileid=None, filelog=None):
186 """get a file context from this changeset"""
186 """get a file context from this changeset"""
187 if fileid is None:
187 if fileid is None:
188 fileid = self.filenode(path)
188 fileid = self.filenode(path)
189 return filectx(self._repo, path, fileid=fileid,
189 return filectx(self._repo, path, fileid=fileid,
190 changectx=self, filelog=filelog)
190 changectx=self, filelog=filelog)
191
191
192 def ancestor(self, c2):
192 def ancestor(self, c2):
193 """
193 """
194 return the ancestor context of self and c2
194 return the ancestor context of self and c2
195 """
195 """
196 # deal with workingctxs
196 # deal with workingctxs
197 n2 = c2._node
197 n2 = c2._node
198 if n2 is None:
198 if n2 is None:
199 n2 = c2._parents[0]._node
199 n2 = c2._parents[0]._node
200 n = self._repo.changelog.ancestor(self._node, n2)
200 n = self._repo.changelog.ancestor(self._node, n2)
201 return changectx(self._repo, n)
201 return changectx(self._repo, n)
202
202
203 def walk(self, match):
203 def walk(self, match):
204 fset = set(match.files())
204 fset = set(match.files())
205 # for dirstate.walk, files=['.'] means "walk the whole tree".
205 # for dirstate.walk, files=['.'] means "walk the whole tree".
206 # follow that here, too
206 # follow that here, too
207 fset.discard('.')
207 fset.discard('.')
208 for fn in self:
208 for fn in self:
209 for ffn in fset:
209 for ffn in fset:
210 # match if the file is the exact name or a directory
210 # match if the file is the exact name or a directory
211 if ffn == fn or fn.startswith("%s/" % ffn):
211 if ffn == fn or fn.startswith("%s/" % ffn):
212 fset.remove(ffn)
212 fset.remove(ffn)
213 break
213 break
214 if match(fn):
214 if match(fn):
215 yield fn
215 yield fn
216 for fn in sorted(fset):
216 for fn in sorted(fset):
217 if match.bad(fn, _('no such file in rev %s') % self) and match(fn):
217 if match.bad(fn, _('no such file in rev %s') % self) and match(fn):
218 yield fn
218 yield fn
219
219
220 def sub(self, path):
220 def sub(self, path):
221 return subrepo.subrepo(self, path)
221 return subrepo.subrepo(self, path)
222
222
223 def match(self, pats=[], include=None, exclude=None, default='glob'):
223 def match(self, pats=[], include=None, exclude=None, default='glob'):
224 r = self._repo
224 r = self._repo
225 return matchmod.match(r.root, r.getcwd(), pats,
225 return matchmod.match(r.root, r.getcwd(), pats,
226 include, exclude, default,
226 include, exclude, default,
227 auditor=r.auditor, ctx=self)
227 auditor=r.auditor, ctx=self)
228
228
229 def diff(self, ctx2=None, match=None, **opts):
229 def diff(self, ctx2=None, match=None, **opts):
230 """Returns a diff generator for the given contexts and matcher"""
230 """Returns a diff generator for the given contexts and matcher"""
231 if ctx2 is None:
231 if ctx2 is None:
232 ctx2 = self.p1()
232 ctx2 = self.p1()
233 if ctx2 is not None and not isinstance(ctx2, changectx):
233 if ctx2 is not None and not isinstance(ctx2, changectx):
234 ctx2 = self._repo[ctx2]
234 ctx2 = self._repo[ctx2]
235 diffopts = patch.diffopts(self._repo.ui, opts)
235 diffopts = patch.diffopts(self._repo.ui, opts)
236 return patch.diff(self._repo, ctx2.node(), self.node(),
236 return patch.diff(self._repo, ctx2.node(), self.node(),
237 match=match, opts=diffopts)
237 match=match, opts=diffopts)
238
238
239 class filectx(object):
239 class filectx(object):
240 """A filecontext object makes access to data related to a particular
240 """A filecontext object makes access to data related to a particular
241 filerevision convenient."""
241 filerevision convenient."""
242 def __init__(self, repo, path, changeid=None, fileid=None,
242 def __init__(self, repo, path, changeid=None, fileid=None,
243 filelog=None, changectx=None):
243 filelog=None, changectx=None):
244 """changeid can be a changeset revision, node, or tag.
244 """changeid can be a changeset revision, node, or tag.
245 fileid can be a file revision or node."""
245 fileid can be a file revision or node."""
246 self._repo = repo
246 self._repo = repo
247 self._path = path
247 self._path = path
248
248
249 assert (changeid is not None
249 assert (changeid is not None
250 or fileid is not None
250 or fileid is not None
251 or changectx is not None), \
251 or changectx is not None), \
252 ("bad args: changeid=%r, fileid=%r, changectx=%r"
252 ("bad args: changeid=%r, fileid=%r, changectx=%r"
253 % (changeid, fileid, changectx))
253 % (changeid, fileid, changectx))
254
254
255 if filelog:
255 if filelog:
256 self._filelog = filelog
256 self._filelog = filelog
257
257
258 if changeid is not None:
258 if changeid is not None:
259 self._changeid = changeid
259 self._changeid = changeid
260 if changectx is not None:
260 if changectx is not None:
261 self._changectx = changectx
261 self._changectx = changectx
262 if fileid is not None:
262 if fileid is not None:
263 self._fileid = fileid
263 self._fileid = fileid
264
264
265 @propertycache
265 @propertycache
266 def _changectx(self):
266 def _changectx(self):
267 return changectx(self._repo, self._changeid)
267 return changectx(self._repo, self._changeid)
268
268
269 @propertycache
269 @propertycache
270 def _filelog(self):
270 def _filelog(self):
271 return self._repo.file(self._path)
271 return self._repo.file(self._path)
272
272
273 @propertycache
273 @propertycache
274 def _changeid(self):
274 def _changeid(self):
275 if '_changectx' in self.__dict__:
275 if '_changectx' in self.__dict__:
276 return self._changectx.rev()
276 return self._changectx.rev()
277 else:
277 else:
278 return self._filelog.linkrev(self._filerev)
278 return self._filelog.linkrev(self._filerev)
279
279
280 @propertycache
280 @propertycache
281 def _filenode(self):
281 def _filenode(self):
282 if '_fileid' in self.__dict__:
282 if '_fileid' in self.__dict__:
283 return self._filelog.lookup(self._fileid)
283 return self._filelog.lookup(self._fileid)
284 else:
284 else:
285 return self._changectx.filenode(self._path)
285 return self._changectx.filenode(self._path)
286
286
287 @propertycache
287 @propertycache
288 def _filerev(self):
288 def _filerev(self):
289 return self._filelog.rev(self._filenode)
289 return self._filelog.rev(self._filenode)
290
290
291 @propertycache
291 @propertycache
292 def _repopath(self):
292 def _repopath(self):
293 return self._path
293 return self._path
294
294
295 def __nonzero__(self):
295 def __nonzero__(self):
296 try:
296 try:
297 self._filenode
297 self._filenode
298 return True
298 return True
299 except error.LookupError:
299 except error.LookupError:
300 # file is missing
300 # file is missing
301 return False
301 return False
302
302
303 def __str__(self):
303 def __str__(self):
304 return "%s@%s" % (self.path(), short(self.node()))
304 return "%s@%s" % (self.path(), short(self.node()))
305
305
306 def __repr__(self):
306 def __repr__(self):
307 return "<filectx %s>" % str(self)
307 return "<filectx %s>" % str(self)
308
308
309 def __hash__(self):
309 def __hash__(self):
310 try:
310 try:
311 return hash((self._path, self._filenode))
311 return hash((self._path, self._filenode))
312 except AttributeError:
312 except AttributeError:
313 return id(self)
313 return id(self)
314
314
315 def __eq__(self, other):
315 def __eq__(self, other):
316 try:
316 try:
317 return (self._path == other._path
317 return (self._path == other._path
318 and self._filenode == other._filenode)
318 and self._filenode == other._filenode)
319 except AttributeError:
319 except AttributeError:
320 return False
320 return False
321
321
322 def __ne__(self, other):
322 def __ne__(self, other):
323 return not (self == other)
323 return not (self == other)
324
324
325 def filectx(self, fileid):
325 def filectx(self, fileid):
326 '''opens an arbitrary revision of the file without
326 '''opens an arbitrary revision of the file without
327 opening a new filelog'''
327 opening a new filelog'''
328 return filectx(self._repo, self._path, fileid=fileid,
328 return filectx(self._repo, self._path, fileid=fileid,
329 filelog=self._filelog)
329 filelog=self._filelog)
330
330
331 def filerev(self):
331 def filerev(self):
332 return self._filerev
332 return self._filerev
333 def filenode(self):
333 def filenode(self):
334 return self._filenode
334 return self._filenode
335 def flags(self):
335 def flags(self):
336 return self._changectx.flags(self._path)
336 return self._changectx.flags(self._path)
337 def filelog(self):
337 def filelog(self):
338 return self._filelog
338 return self._filelog
339
339
340 def rev(self):
340 def rev(self):
341 if '_changectx' in self.__dict__:
341 if '_changectx' in self.__dict__:
342 return self._changectx.rev()
342 return self._changectx.rev()
343 if '_changeid' in self.__dict__:
343 if '_changeid' in self.__dict__:
344 return self._changectx.rev()
344 return self._changectx.rev()
345 return self._filelog.linkrev(self._filerev)
345 return self._filelog.linkrev(self._filerev)
346
346
347 def linkrev(self):
347 def linkrev(self):
348 return self._filelog.linkrev(self._filerev)
348 return self._filelog.linkrev(self._filerev)
349 def node(self):
349 def node(self):
350 return self._changectx.node()
350 return self._changectx.node()
351 def hex(self):
351 def hex(self):
352 return hex(self.node())
352 return hex(self.node())
353 def user(self):
353 def user(self):
354 return self._changectx.user()
354 return self._changectx.user()
355 def date(self):
355 def date(self):
356 return self._changectx.date()
356 return self._changectx.date()
357 def files(self):
357 def files(self):
358 return self._changectx.files()
358 return self._changectx.files()
359 def description(self):
359 def description(self):
360 return self._changectx.description()
360 return self._changectx.description()
361 def branch(self):
361 def branch(self):
362 return self._changectx.branch()
362 return self._changectx.branch()
363 def extra(self):
363 def extra(self):
364 return self._changectx.extra()
364 return self._changectx.extra()
365 def manifest(self):
365 def manifest(self):
366 return self._changectx.manifest()
366 return self._changectx.manifest()
367 def changectx(self):
367 def changectx(self):
368 return self._changectx
368 return self._changectx
369
369
370 def data(self):
370 def data(self):
371 return self._filelog.read(self._filenode)
371 return self._filelog.read(self._filenode)
372 def path(self):
372 def path(self):
373 return self._path
373 return self._path
374 def size(self):
374 def size(self):
375 return self._filelog.size(self._filerev)
375 return self._filelog.size(self._filerev)
376
376
377 def isbinary(self):
377 def isbinary(self):
378 try:
378 try:
379 return util.binary(self.data())
379 return util.binary(self.data())
380 except IOError:
380 except IOError:
381 return False
381 return False
382
382
383 def cmp(self, fctx):
383 def cmp(self, fctx):
384 """compare with other file context
384 """compare with other file context
385
385
386 returns True if different than fctx.
386 returns True if different than fctx.
387 """
387 """
388 if (fctx._filerev is None
388 if (fctx._filerev is None
389 and (self._repo._encodefilterpats
389 and (self._repo._encodefilterpats
390 # if file data starts with '\1\n', empty metadata block is
390 # if file data starts with '\1\n', empty metadata block is
391 # prepended, which adds 4 bytes to filelog.size().
391 # prepended, which adds 4 bytes to filelog.size().
392 or self.size() - 4 == fctx.size())
392 or self.size() - 4 == fctx.size())
393 or self.size() == fctx.size()):
393 or self.size() == fctx.size()):
394 return self._filelog.cmp(self._filenode, fctx.data())
394 return self._filelog.cmp(self._filenode, fctx.data())
395
395
396 return True
396 return True
397
397
398 def renamed(self):
398 def renamed(self):
399 """check if file was actually renamed in this changeset revision
399 """check if file was actually renamed in this changeset revision
400
400
401 If rename logged in file revision, we report copy for changeset only
401 If rename logged in file revision, we report copy for changeset only
402 if file revisions linkrev points back to the changeset in question
402 if file revisions linkrev points back to the changeset in question
403 or both changeset parents contain different file revisions.
403 or both changeset parents contain different file revisions.
404 """
404 """
405
405
406 renamed = self._filelog.renamed(self._filenode)
406 renamed = self._filelog.renamed(self._filenode)
407 if not renamed:
407 if not renamed:
408 return renamed
408 return renamed
409
409
410 if self.rev() == self.linkrev():
410 if self.rev() == self.linkrev():
411 return renamed
411 return renamed
412
412
413 name = self.path()
413 name = self.path()
414 fnode = self._filenode
414 fnode = self._filenode
415 for p in self._changectx.parents():
415 for p in self._changectx.parents():
416 try:
416 try:
417 if fnode == p.filenode(name):
417 if fnode == p.filenode(name):
418 return None
418 return None
419 except error.LookupError:
419 except error.LookupError:
420 pass
420 pass
421 return renamed
421 return renamed
422
422
423 def parents(self):
423 def parents(self):
424 p = self._path
424 p = self._path
425 fl = self._filelog
425 fl = self._filelog
426 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
426 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
427
427
428 r = self._filelog.renamed(self._filenode)
428 r = self._filelog.renamed(self._filenode)
429 if r:
429 if r:
430 pl[0] = (r[0], r[1], None)
430 pl[0] = (r[0], r[1], None)
431
431
432 return [filectx(self._repo, p, fileid=n, filelog=l)
432 return [filectx(self._repo, p, fileid=n, filelog=l)
433 for p, n, l in pl if n != nullid]
433 for p, n, l in pl if n != nullid]
434
434
435 def p1(self):
435 def p1(self):
436 return self.parents()[0]
436 return self.parents()[0]
437
437
438 def p2(self):
438 def p2(self):
439 p = self.parents()
439 p = self.parents()
440 if len(p) == 2:
440 if len(p) == 2:
441 return p[1]
441 return p[1]
442 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
442 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
443
443
444 def children(self):
444 def children(self):
445 # hard for renames
445 # hard for renames
446 c = self._filelog.children(self._filenode)
446 c = self._filelog.children(self._filenode)
447 return [filectx(self._repo, self._path, fileid=x,
447 return [filectx(self._repo, self._path, fileid=x,
448 filelog=self._filelog) for x in c]
448 filelog=self._filelog) for x in c]
449
449
450 def annotate(self, follow=False, linenumber=None, diffopts=None):
450 def annotate(self, follow=False, linenumber=None, diffopts=None):
451 '''returns a list of tuples of (ctx, line) for each line
451 '''returns a list of tuples of (ctx, line) for each line
452 in the file, where ctx is the filectx of the node where
452 in the file, where ctx is the filectx of the node where
453 that line was last changed.
453 that line was last changed.
454 This returns tuples of ((ctx, linenumber), line) for each line,
454 This returns tuples of ((ctx, linenumber), line) for each line,
455 if "linenumber" parameter is NOT "None".
455 if "linenumber" parameter is NOT "None".
456 In such tuples, linenumber means one at the first appearance
456 In such tuples, linenumber means one at the first appearance
457 in the managed file.
457 in the managed file.
458 To reduce annotation cost,
458 To reduce annotation cost,
459 this returns fixed value(False is used) as linenumber,
459 this returns fixed value(False is used) as linenumber,
460 if "linenumber" parameter is "False".'''
460 if "linenumber" parameter is "False".'''
461
461
462 def decorate_compat(text, rev):
462 def decorate_compat(text, rev):
463 return ([rev] * len(text.splitlines()), text)
463 return ([rev] * len(text.splitlines()), text)
464
464
465 def without_linenumber(text, rev):
465 def without_linenumber(text, rev):
466 return ([(rev, False)] * len(text.splitlines()), text)
466 return ([(rev, False)] * len(text.splitlines()), text)
467
467
468 def with_linenumber(text, rev):
468 def with_linenumber(text, rev):
469 size = len(text.splitlines())
469 size = len(text.splitlines())
470 return ([(rev, i) for i in xrange(1, size + 1)], text)
470 return ([(rev, i) for i in xrange(1, size + 1)], text)
471
471
472 decorate = (((linenumber is None) and decorate_compat) or
472 decorate = (((linenumber is None) and decorate_compat) or
473 (linenumber and with_linenumber) or
473 (linenumber and with_linenumber) or
474 without_linenumber)
474 without_linenumber)
475
475
476 def pair(parent, child):
476 def pair(parent, child):
477 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
477 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
478 refine=True)
478 refine=True)
479 for (a1, a2, b1, b2), t in blocks:
479 for (a1, a2, b1, b2), t in blocks:
480 # Changed blocks ('!') or blocks made only of blank lines ('~')
480 # Changed blocks ('!') or blocks made only of blank lines ('~')
481 # belong to the child.
481 # belong to the child.
482 if t == '=':
482 if t == '=':
483 child[0][b1:b2] = parent[0][a1:a2]
483 child[0][b1:b2] = parent[0][a1:a2]
484 return child
484 return child
485
485
486 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
486 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
487 def getctx(path, fileid):
487 def getctx(path, fileid):
488 log = path == self._path and self._filelog or getlog(path)
488 log = path == self._path and self._filelog or getlog(path)
489 return filectx(self._repo, path, fileid=fileid, filelog=log)
489 return filectx(self._repo, path, fileid=fileid, filelog=log)
490 getctx = util.lrucachefunc(getctx)
490 getctx = util.lrucachefunc(getctx)
491
491
492 def parents(f):
492 def parents(f):
493 # we want to reuse filectx objects as much as possible
493 # we want to reuse filectx objects as much as possible
494 p = f._path
494 p = f._path
495 if f._filerev is None: # working dir
495 if f._filerev is None: # working dir
496 pl = [(n.path(), n.filerev()) for n in f.parents()]
496 pl = [(n.path(), n.filerev()) for n in f.parents()]
497 else:
497 else:
498 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
498 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
499
499
500 if follow:
500 if follow:
501 r = f.renamed()
501 r = f.renamed()
502 if r:
502 if r:
503 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
503 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
504
504
505 return [getctx(p, n) for p, n in pl if n != nullrev]
505 return [getctx(p, n) for p, n in pl if n != nullrev]
506
506
507 # use linkrev to find the first changeset where self appeared
507 # use linkrev to find the first changeset where self appeared
508 if self.rev() != self.linkrev():
508 if self.rev() != self.linkrev():
509 base = self.filectx(self.filerev())
509 base = self.filectx(self.filerev())
510 else:
510 else:
511 base = self
511 base = self
512
512
513 # This algorithm would prefer to be recursive, but Python is a
513 # This algorithm would prefer to be recursive, but Python is a
514 # bit recursion-hostile. Instead we do an iterative
514 # bit recursion-hostile. Instead we do an iterative
515 # depth-first search.
515 # depth-first search.
516
516
517 visit = [base]
517 visit = [base]
518 hist = {}
518 hist = {}
519 pcache = {}
519 pcache = {}
520 needed = {base: 1}
520 needed = {base: 1}
521 while visit:
521 while visit:
522 f = visit[-1]
522 f = visit[-1]
523 if f not in pcache:
523 if f not in pcache:
524 pcache[f] = parents(f)
524 pcache[f] = parents(f)
525
525
526 ready = True
526 ready = True
527 pl = pcache[f]
527 pl = pcache[f]
528 for p in pl:
528 for p in pl:
529 if p not in hist:
529 if p not in hist:
530 ready = False
530 ready = False
531 visit.append(p)
531 visit.append(p)
532 needed[p] = needed.get(p, 0) + 1
532 needed[p] = needed.get(p, 0) + 1
533 if ready:
533 if ready:
534 visit.pop()
534 visit.pop()
535 curr = decorate(f.data(), f)
535 curr = decorate(f.data(), f)
536 for p in pl:
536 for p in pl:
537 curr = pair(hist[p], curr)
537 curr = pair(hist[p], curr)
538 if needed[p] == 1:
538 if needed[p] == 1:
539 del hist[p]
539 del hist[p]
540 else:
540 else:
541 needed[p] -= 1
541 needed[p] -= 1
542
542
543 hist[f] = curr
543 hist[f] = curr
544 pcache[f] = []
544 pcache[f] = []
545
545
546 return zip(hist[base][0], hist[base][1].splitlines(True))
546 return zip(hist[base][0], hist[base][1].splitlines(True))
547
547
548 def ancestor(self, fc2, actx=None):
548 def ancestor(self, fc2, actx=None):
549 """
549 """
550 find the common ancestor file context, if any, of self, and fc2
550 find the common ancestor file context, if any, of self, and fc2
551
551
552 If actx is given, it must be the changectx of the common ancestor
552 If actx is given, it must be the changectx of the common ancestor
553 of self's and fc2's respective changesets.
553 of self's and fc2's respective changesets.
554 """
554 """
555
555
556 if actx is None:
556 if actx is None:
557 actx = self.changectx().ancestor(fc2.changectx())
557 actx = self.changectx().ancestor(fc2.changectx())
558
558
559 # the trivial case: changesets are unrelated, files must be too
559 # the trivial case: changesets are unrelated, files must be too
560 if not actx:
560 if not actx:
561 return None
561 return None
562
562
563 # the easy case: no (relevant) renames
563 # the easy case: no (relevant) renames
564 if fc2.path() == self.path() and self.path() in actx:
564 if fc2.path() == self.path() and self.path() in actx:
565 return actx[self.path()]
565 return actx[self.path()]
566 acache = {}
566 acache = {}
567
567
568 # prime the ancestor cache for the working directory
568 # prime the ancestor cache for the working directory
569 for c in (self, fc2):
569 for c in (self, fc2):
570 if c._filerev is None:
570 if c._filerev is None:
571 pl = [(n.path(), n.filenode()) for n in c.parents()]
571 pl = [(n.path(), n.filenode()) for n in c.parents()]
572 acache[(c._path, None)] = pl
572 acache[(c._path, None)] = pl
573
573
574 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
574 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
575 def parents(vertex):
575 def parents(vertex):
576 if vertex in acache:
576 if vertex in acache:
577 return acache[vertex]
577 return acache[vertex]
578 f, n = vertex
578 f, n = vertex
579 if f not in flcache:
579 if f not in flcache:
580 flcache[f] = self._repo.file(f)
580 flcache[f] = self._repo.file(f)
581 fl = flcache[f]
581 fl = flcache[f]
582 pl = [(f, p) for p in fl.parents(n) if p != nullid]
582 pl = [(f, p) for p in fl.parents(n) if p != nullid]
583 re = fl.renamed(n)
583 re = fl.renamed(n)
584 if re:
584 if re:
585 pl.append(re)
585 pl.append(re)
586 acache[vertex] = pl
586 acache[vertex] = pl
587 return pl
587 return pl
588
588
589 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
589 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
590 v = ancestor.ancestor(a, b, parents)
590 v = ancestor.ancestor(a, b, parents)
591 if v:
591 if v:
592 f, n = v
592 f, n = v
593 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
593 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
594
594
595 return None
595 return None
596
596
597 def ancestors(self):
597 def ancestors(self):
598 visit = {}
598 visit = {}
599 c = self
599 c = self
600 while True:
600 while True:
601 for parent in c.parents():
601 for parent in c.parents():
602 visit[(parent.rev(), parent.node())] = parent
602 visit[(parent.rev(), parent.node())] = parent
603 if not visit:
603 if not visit:
604 break
604 break
605 c = visit.pop(max(visit))
605 c = visit.pop(max(visit))
606 yield c
606 yield c
607
607
608 class workingctx(changectx):
608 class workingctx(changectx):
609 """A workingctx object makes access to data related to
609 """A workingctx object makes access to data related to
610 the current working directory convenient.
610 the current working directory convenient.
611 date - any valid date string or (unixtime, offset), or None.
611 date - any valid date string or (unixtime, offset), or None.
612 user - username string, or None.
612 user - username string, or None.
613 extra - a dictionary of extra values, or None.
613 extra - a dictionary of extra values, or None.
614 changes - a list of file lists as returned by localrepo.status()
614 changes - a list of file lists as returned by localrepo.status()
615 or None to use the repository status.
615 or None to use the repository status.
616 """
616 """
617 def __init__(self, repo, text="", user=None, date=None, extra=None,
617 def __init__(self, repo, text="", user=None, date=None, extra=None,
618 changes=None):
618 changes=None):
619 self._repo = repo
619 self._repo = repo
620 self._rev = None
620 self._rev = None
621 self._node = None
621 self._node = None
622 self._text = text
622 self._text = text
623 if date:
623 if date:
624 self._date = util.parsedate(date)
624 self._date = util.parsedate(date)
625 if user:
625 if user:
626 self._user = user
626 self._user = user
627 if changes:
627 if changes:
628 self._status = list(changes[:4])
628 self._status = list(changes[:4])
629 self._unknown = changes[4]
629 self._unknown = changes[4]
630 self._ignored = changes[5]
630 self._ignored = changes[5]
631 self._clean = changes[6]
631 self._clean = changes[6]
632 else:
632 else:
633 self._unknown = None
633 self._unknown = None
634 self._ignored = None
634 self._ignored = None
635 self._clean = None
635 self._clean = None
636
636
637 self._extra = {}
637 self._extra = {}
638 if extra:
638 if extra:
639 self._extra = extra.copy()
639 self._extra = extra.copy()
640 if 'branch' not in self._extra:
640 if 'branch' not in self._extra:
641 try:
641 try:
642 branch = encoding.fromlocal(self._repo.dirstate.branch())
642 branch = encoding.fromlocal(self._repo.dirstate.branch())
643 except UnicodeDecodeError:
643 except UnicodeDecodeError:
644 raise util.Abort(_('branch name not in UTF-8!'))
644 raise util.Abort(_('branch name not in UTF-8!'))
645 self._extra['branch'] = branch
645 self._extra['branch'] = branch
646 if self._extra['branch'] == '':
646 if self._extra['branch'] == '':
647 self._extra['branch'] = 'default'
647 self._extra['branch'] = 'default'
648
648
649 def __str__(self):
649 def __str__(self):
650 return str(self._parents[0]) + "+"
650 return str(self._parents[0]) + "+"
651
651
652 def __repr__(self):
652 def __repr__(self):
653 return "<workingctx %s>" % str(self)
653 return "<workingctx %s>" % str(self)
654
654
655 def __nonzero__(self):
655 def __nonzero__(self):
656 return True
656 return True
657
657
658 def __contains__(self, key):
658 def __contains__(self, key):
659 return self._repo.dirstate[key] not in "?r"
659 return self._repo.dirstate[key] not in "?r"
660
660
661 def _buildflagfunc(self):
661 def _buildflagfunc(self):
662 # Create a fallback function for getting file flags when the
662 # Create a fallback function for getting file flags when the
663 # filesystem doesn't support them
663 # filesystem doesn't support them
664
664
665 copiesget = self._repo.dirstate.copies().get
665 copiesget = self._repo.dirstate.copies().get
666
666
667 if len(self._parents) < 2:
667 if len(self._parents) < 2:
668 # when we have one parent, it's easy: copy from parent
668 # when we have one parent, it's easy: copy from parent
669 man = self._parents[0].manifest()
669 man = self._parents[0].manifest()
670 def func(f):
670 def func(f):
671 f = copiesget(f, f)
671 f = copiesget(f, f)
672 return man.flags(f)
672 return man.flags(f)
673 else:
673 else:
674 # merges are tricky: we try to reconstruct the unstored
674 # merges are tricky: we try to reconstruct the unstored
675 # result from the merge (issue1802)
675 # result from the merge (issue1802)
676 p1, p2 = self._parents
676 p1, p2 = self._parents
677 pa = p1.ancestor(p2)
677 pa = p1.ancestor(p2)
678 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
678 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
679
679
680 def func(f):
680 def func(f):
681 f = copiesget(f, f) # may be wrong for merges with copies
681 f = copiesget(f, f) # may be wrong for merges with copies
682 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
682 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
683 if fl1 == fl2:
683 if fl1 == fl2:
684 return fl1
684 return fl1
685 if fl1 == fla:
685 if fl1 == fla:
686 return fl2
686 return fl2
687 if fl2 == fla:
687 if fl2 == fla:
688 return fl1
688 return fl1
689 return '' # punt for conflicts
689 return '' # punt for conflicts
690
690
691 return func
691 return func
692
692
693 @propertycache
693 @propertycache
694 def _flagfunc(self):
694 def _flagfunc(self):
695 return self._repo.dirstate.flagfunc(self._buildflagfunc)
695 return self._repo.dirstate.flagfunc(self._buildflagfunc)
696
696
697 @propertycache
697 @propertycache
698 def _manifest(self):
698 def _manifest(self):
699 """generate a manifest corresponding to the working directory"""
699 """generate a manifest corresponding to the working directory"""
700
700
701 if self._unknown is None:
702 self.status(unknown=True)
703
704 man = self._parents[0].manifest().copy()
701 man = self._parents[0].manifest().copy()
705 if len(self._parents) > 1:
702 if len(self._parents) > 1:
706 man2 = self.p2().manifest()
703 man2 = self.p2().manifest()
707 def getman(f):
704 def getman(f):
708 if f in man:
705 if f in man:
709 return man
706 return man
710 return man2
707 return man2
711 else:
708 else:
712 getman = lambda f: man
709 getman = lambda f: man
713
710
714 copied = self._repo.dirstate.copies()
711 copied = self._repo.dirstate.copies()
715 ff = self._flagfunc
712 ff = self._flagfunc
716 modified, added, removed, deleted = self._status
713 modified, added, removed, deleted = self._status
717 unknown = self._unknown
714 for i, l in (("a", added), ("m", modified)):
718 for i, l in (("a", added), ("m", modified), ("u", unknown)):
719 for f in l:
715 for f in l:
720 orig = copied.get(f, f)
716 orig = copied.get(f, f)
721 man[f] = getman(orig).get(orig, nullid) + i
717 man[f] = getman(orig).get(orig, nullid) + i
722 try:
718 try:
723 man.set(f, ff(f))
719 man.set(f, ff(f))
724 except OSError:
720 except OSError:
725 pass
721 pass
726
722
727 for f in deleted + removed:
723 for f in deleted + removed:
728 if f in man:
724 if f in man:
729 del man[f]
725 del man[f]
730
726
731 return man
727 return man
732
728
733 def __iter__(self):
729 def __iter__(self):
734 d = self._repo.dirstate
730 d = self._repo.dirstate
735 for f in d:
731 for f in d:
736 if d[f] != 'r':
732 if d[f] != 'r':
737 yield f
733 yield f
738
734
739 @propertycache
735 @propertycache
740 def _status(self):
736 def _status(self):
741 return self._repo.status()[:4]
737 return self._repo.status()[:4]
742
738
743 @propertycache
739 @propertycache
744 def _user(self):
740 def _user(self):
745 return self._repo.ui.username()
741 return self._repo.ui.username()
746
742
747 @propertycache
743 @propertycache
748 def _date(self):
744 def _date(self):
749 return util.makedate()
745 return util.makedate()
750
746
751 @propertycache
747 @propertycache
752 def _parents(self):
748 def _parents(self):
753 p = self._repo.dirstate.parents()
749 p = self._repo.dirstate.parents()
754 if p[1] == nullid:
750 if p[1] == nullid:
755 p = p[:-1]
751 p = p[:-1]
756 self._parents = [changectx(self._repo, x) for x in p]
752 self._parents = [changectx(self._repo, x) for x in p]
757 return self._parents
753 return self._parents
758
754
759 def status(self, ignored=False, clean=False, unknown=False):
755 def status(self, ignored=False, clean=False, unknown=False):
760 """Explicit status query
756 """Explicit status query
761 Unless this method is used to query the working copy status, the
757 Unless this method is used to query the working copy status, the
762 _status property will implicitly read the status using its default
758 _status property will implicitly read the status using its default
763 arguments."""
759 arguments."""
764 stat = self._repo.status(ignored=ignored, clean=clean, unknown=unknown)
760 stat = self._repo.status(ignored=ignored, clean=clean, unknown=unknown)
765 self._unknown = self._ignored = self._clean = None
761 self._unknown = self._ignored = self._clean = None
766 if unknown:
762 if unknown:
767 self._unknown = stat[4]
763 self._unknown = stat[4]
768 if ignored:
764 if ignored:
769 self._ignored = stat[5]
765 self._ignored = stat[5]
770 if clean:
766 if clean:
771 self._clean = stat[6]
767 self._clean = stat[6]
772 self._status = stat[:4]
768 self._status = stat[:4]
773 return stat
769 return stat
774
770
775 def manifest(self):
771 def manifest(self):
776 return self._manifest
772 return self._manifest
777 def user(self):
773 def user(self):
778 return self._user or self._repo.ui.username()
774 return self._user or self._repo.ui.username()
779 def date(self):
775 def date(self):
780 return self._date
776 return self._date
781 def description(self):
777 def description(self):
782 return self._text
778 return self._text
783 def files(self):
779 def files(self):
784 return sorted(self._status[0] + self._status[1] + self._status[2])
780 return sorted(self._status[0] + self._status[1] + self._status[2])
785
781
786 def modified(self):
782 def modified(self):
787 return self._status[0]
783 return self._status[0]
788 def added(self):
784 def added(self):
789 return self._status[1]
785 return self._status[1]
790 def removed(self):
786 def removed(self):
791 return self._status[2]
787 return self._status[2]
792 def deleted(self):
788 def deleted(self):
793 return self._status[3]
789 return self._status[3]
794 def unknown(self):
790 def unknown(self):
795 assert self._unknown is not None # must call status first
791 assert self._unknown is not None # must call status first
796 return self._unknown
792 return self._unknown
797 def ignored(self):
793 def ignored(self):
798 assert self._ignored is not None # must call status first
794 assert self._ignored is not None # must call status first
799 return self._ignored
795 return self._ignored
800 def clean(self):
796 def clean(self):
801 assert self._clean is not None # must call status first
797 assert self._clean is not None # must call status first
802 return self._clean
798 return self._clean
803 def branch(self):
799 def branch(self):
804 return encoding.tolocal(self._extra['branch'])
800 return encoding.tolocal(self._extra['branch'])
805 def extra(self):
801 def extra(self):
806 return self._extra
802 return self._extra
807
803
808 def tags(self):
804 def tags(self):
809 t = []
805 t = []
810 for p in self.parents():
806 for p in self.parents():
811 t.extend(p.tags())
807 t.extend(p.tags())
812 return t
808 return t
813
809
814 def bookmarks(self):
810 def bookmarks(self):
815 b = []
811 b = []
816 for p in self.parents():
812 for p in self.parents():
817 b.extend(p.bookmarks())
813 b.extend(p.bookmarks())
818 return b
814 return b
819
815
820 def phase(self):
816 def phase(self):
821 phase = phases.draft # default phase to draft
817 phase = phases.draft # default phase to draft
822 for p in self.parents():
818 for p in self.parents():
823 phase = max(phase, p.phase())
819 phase = max(phase, p.phase())
824 return phase
820 return phase
825
821
826 def hidden(self):
822 def hidden(self):
827 return False
823 return False
828
824
829 def children(self):
825 def children(self):
830 return []
826 return []
831
827
832 def flags(self, path):
828 def flags(self, path):
833 if '_manifest' in self.__dict__:
829 if '_manifest' in self.__dict__:
834 try:
830 try:
835 return self._manifest.flags(path)
831 return self._manifest.flags(path)
836 except KeyError:
832 except KeyError:
837 return ''
833 return ''
838
834
839 try:
835 try:
840 return self._flagfunc(path)
836 return self._flagfunc(path)
841 except OSError:
837 except OSError:
842 return ''
838 return ''
843
839
844 def filectx(self, path, filelog=None):
840 def filectx(self, path, filelog=None):
845 """get a file context from the working directory"""
841 """get a file context from the working directory"""
846 return workingfilectx(self._repo, path, workingctx=self,
842 return workingfilectx(self._repo, path, workingctx=self,
847 filelog=filelog)
843 filelog=filelog)
848
844
849 def ancestor(self, c2):
845 def ancestor(self, c2):
850 """return the ancestor context of self and c2"""
846 """return the ancestor context of self and c2"""
851 return self._parents[0].ancestor(c2) # punt on two parents for now
847 return self._parents[0].ancestor(c2) # punt on two parents for now
852
848
853 def walk(self, match):
849 def walk(self, match):
854 return sorted(self._repo.dirstate.walk(match, self.substate.keys(),
850 return sorted(self._repo.dirstate.walk(match, self.substate.keys(),
855 True, False))
851 True, False))
856
852
857 def dirty(self, missing=False):
853 def dirty(self, missing=False):
858 "check whether a working directory is modified"
854 "check whether a working directory is modified"
859 # check subrepos first
855 # check subrepos first
860 for s in self.substate:
856 for s in self.substate:
861 if self.sub(s).dirty():
857 if self.sub(s).dirty():
862 return True
858 return True
863 # check current working dir
859 # check current working dir
864 return (self.p2() or self.branch() != self.p1().branch() or
860 return (self.p2() or self.branch() != self.p1().branch() or
865 self.modified() or self.added() or self.removed() or
861 self.modified() or self.added() or self.removed() or
866 (missing and self.deleted()))
862 (missing and self.deleted()))
867
863
868 def add(self, list, prefix=""):
864 def add(self, list, prefix=""):
869 join = lambda f: os.path.join(prefix, f)
865 join = lambda f: os.path.join(prefix, f)
870 wlock = self._repo.wlock()
866 wlock = self._repo.wlock()
871 ui, ds = self._repo.ui, self._repo.dirstate
867 ui, ds = self._repo.ui, self._repo.dirstate
872 try:
868 try:
873 rejected = []
869 rejected = []
874 for f in list:
870 for f in list:
875 scmutil.checkportable(ui, join(f))
871 scmutil.checkportable(ui, join(f))
876 p = self._repo.wjoin(f)
872 p = self._repo.wjoin(f)
877 try:
873 try:
878 st = os.lstat(p)
874 st = os.lstat(p)
879 except OSError:
875 except OSError:
880 ui.warn(_("%s does not exist!\n") % join(f))
876 ui.warn(_("%s does not exist!\n") % join(f))
881 rejected.append(f)
877 rejected.append(f)
882 continue
878 continue
883 if st.st_size > 10000000:
879 if st.st_size > 10000000:
884 ui.warn(_("%s: up to %d MB of RAM may be required "
880 ui.warn(_("%s: up to %d MB of RAM may be required "
885 "to manage this file\n"
881 "to manage this file\n"
886 "(use 'hg revert %s' to cancel the "
882 "(use 'hg revert %s' to cancel the "
887 "pending addition)\n")
883 "pending addition)\n")
888 % (f, 3 * st.st_size // 1000000, join(f)))
884 % (f, 3 * st.st_size // 1000000, join(f)))
889 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
885 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
890 ui.warn(_("%s not added: only files and symlinks "
886 ui.warn(_("%s not added: only files and symlinks "
891 "supported currently\n") % join(f))
887 "supported currently\n") % join(f))
892 rejected.append(p)
888 rejected.append(p)
893 elif ds[f] in 'amn':
889 elif ds[f] in 'amn':
894 ui.warn(_("%s already tracked!\n") % join(f))
890 ui.warn(_("%s already tracked!\n") % join(f))
895 elif ds[f] == 'r':
891 elif ds[f] == 'r':
896 ds.normallookup(f)
892 ds.normallookup(f)
897 else:
893 else:
898 ds.add(f)
894 ds.add(f)
899 return rejected
895 return rejected
900 finally:
896 finally:
901 wlock.release()
897 wlock.release()
902
898
903 def forget(self, files, prefix=""):
899 def forget(self, files, prefix=""):
904 join = lambda f: os.path.join(prefix, f)
900 join = lambda f: os.path.join(prefix, f)
905 wlock = self._repo.wlock()
901 wlock = self._repo.wlock()
906 try:
902 try:
907 rejected = []
903 rejected = []
908 for f in files:
904 for f in files:
909 if self._repo.dirstate[f] != 'a':
905 if self._repo.dirstate[f] != 'a':
910 self._repo.dirstate.remove(f)
906 self._repo.dirstate.remove(f)
911 elif f not in self._repo.dirstate:
907 elif f not in self._repo.dirstate:
912 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
908 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
913 rejected.append(f)
909 rejected.append(f)
914 else:
910 else:
915 self._repo.dirstate.drop(f)
911 self._repo.dirstate.drop(f)
916 return rejected
912 return rejected
917 finally:
913 finally:
918 wlock.release()
914 wlock.release()
919
915
920 def ancestors(self):
916 def ancestors(self):
921 for a in self._repo.changelog.ancestors(
917 for a in self._repo.changelog.ancestors(
922 *[p.rev() for p in self._parents]):
918 *[p.rev() for p in self._parents]):
923 yield changectx(self._repo, a)
919 yield changectx(self._repo, a)
924
920
925 def undelete(self, list):
921 def undelete(self, list):
926 pctxs = self.parents()
922 pctxs = self.parents()
927 wlock = self._repo.wlock()
923 wlock = self._repo.wlock()
928 try:
924 try:
929 for f in list:
925 for f in list:
930 if self._repo.dirstate[f] != 'r':
926 if self._repo.dirstate[f] != 'r':
931 self._repo.ui.warn(_("%s not removed!\n") % f)
927 self._repo.ui.warn(_("%s not removed!\n") % f)
932 else:
928 else:
933 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
929 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
934 t = fctx.data()
930 t = fctx.data()
935 self._repo.wwrite(f, t, fctx.flags())
931 self._repo.wwrite(f, t, fctx.flags())
936 self._repo.dirstate.normal(f)
932 self._repo.dirstate.normal(f)
937 finally:
933 finally:
938 wlock.release()
934 wlock.release()
939
935
940 def copy(self, source, dest):
936 def copy(self, source, dest):
941 p = self._repo.wjoin(dest)
937 p = self._repo.wjoin(dest)
942 if not os.path.lexists(p):
938 if not os.path.lexists(p):
943 self._repo.ui.warn(_("%s does not exist!\n") % dest)
939 self._repo.ui.warn(_("%s does not exist!\n") % dest)
944 elif not (os.path.isfile(p) or os.path.islink(p)):
940 elif not (os.path.isfile(p) or os.path.islink(p)):
945 self._repo.ui.warn(_("copy failed: %s is not a file or a "
941 self._repo.ui.warn(_("copy failed: %s is not a file or a "
946 "symbolic link\n") % dest)
942 "symbolic link\n") % dest)
947 else:
943 else:
948 wlock = self._repo.wlock()
944 wlock = self._repo.wlock()
949 try:
945 try:
950 if self._repo.dirstate[dest] in '?r':
946 if self._repo.dirstate[dest] in '?r':
951 self._repo.dirstate.add(dest)
947 self._repo.dirstate.add(dest)
952 self._repo.dirstate.copy(source, dest)
948 self._repo.dirstate.copy(source, dest)
953 finally:
949 finally:
954 wlock.release()
950 wlock.release()
955
951
956 class workingfilectx(filectx):
952 class workingfilectx(filectx):
957 """A workingfilectx object makes access to data related to a particular
953 """A workingfilectx object makes access to data related to a particular
958 file in the working directory convenient."""
954 file in the working directory convenient."""
959 def __init__(self, repo, path, filelog=None, workingctx=None):
955 def __init__(self, repo, path, filelog=None, workingctx=None):
960 """changeid can be a changeset revision, node, or tag.
956 """changeid can be a changeset revision, node, or tag.
961 fileid can be a file revision or node."""
957 fileid can be a file revision or node."""
962 self._repo = repo
958 self._repo = repo
963 self._path = path
959 self._path = path
964 self._changeid = None
960 self._changeid = None
965 self._filerev = self._filenode = None
961 self._filerev = self._filenode = None
966
962
967 if filelog:
963 if filelog:
968 self._filelog = filelog
964 self._filelog = filelog
969 if workingctx:
965 if workingctx:
970 self._changectx = workingctx
966 self._changectx = workingctx
971
967
972 @propertycache
968 @propertycache
973 def _changectx(self):
969 def _changectx(self):
974 return workingctx(self._repo)
970 return workingctx(self._repo)
975
971
976 def __nonzero__(self):
972 def __nonzero__(self):
977 return True
973 return True
978
974
979 def __str__(self):
975 def __str__(self):
980 return "%s@%s" % (self.path(), self._changectx)
976 return "%s@%s" % (self.path(), self._changectx)
981
977
982 def __repr__(self):
978 def __repr__(self):
983 return "<workingfilectx %s>" % str(self)
979 return "<workingfilectx %s>" % str(self)
984
980
985 def data(self):
981 def data(self):
986 return self._repo.wread(self._path)
982 return self._repo.wread(self._path)
987 def renamed(self):
983 def renamed(self):
988 rp = self._repo.dirstate.copied(self._path)
984 rp = self._repo.dirstate.copied(self._path)
989 if not rp:
985 if not rp:
990 return None
986 return None
991 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
987 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
992
988
993 def parents(self):
989 def parents(self):
994 '''return parent filectxs, following copies if necessary'''
990 '''return parent filectxs, following copies if necessary'''
995 def filenode(ctx, path):
991 def filenode(ctx, path):
996 return ctx._manifest.get(path, nullid)
992 return ctx._manifest.get(path, nullid)
997
993
998 path = self._path
994 path = self._path
999 fl = self._filelog
995 fl = self._filelog
1000 pcl = self._changectx._parents
996 pcl = self._changectx._parents
1001 renamed = self.renamed()
997 renamed = self.renamed()
1002
998
1003 if renamed:
999 if renamed:
1004 pl = [renamed + (None,)]
1000 pl = [renamed + (None,)]
1005 else:
1001 else:
1006 pl = [(path, filenode(pcl[0], path), fl)]
1002 pl = [(path, filenode(pcl[0], path), fl)]
1007
1003
1008 for pc in pcl[1:]:
1004 for pc in pcl[1:]:
1009 pl.append((path, filenode(pc, path), fl))
1005 pl.append((path, filenode(pc, path), fl))
1010
1006
1011 return [filectx(self._repo, p, fileid=n, filelog=l)
1007 return [filectx(self._repo, p, fileid=n, filelog=l)
1012 for p, n, l in pl if n != nullid]
1008 for p, n, l in pl if n != nullid]
1013
1009
1014 def children(self):
1010 def children(self):
1015 return []
1011 return []
1016
1012
1017 def size(self):
1013 def size(self):
1018 return os.lstat(self._repo.wjoin(self._path)).st_size
1014 return os.lstat(self._repo.wjoin(self._path)).st_size
1019 def date(self):
1015 def date(self):
1020 t, tz = self._changectx.date()
1016 t, tz = self._changectx.date()
1021 try:
1017 try:
1022 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
1018 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
1023 except OSError, err:
1019 except OSError, err:
1024 if err.errno != errno.ENOENT:
1020 if err.errno != errno.ENOENT:
1025 raise
1021 raise
1026 return (t, tz)
1022 return (t, tz)
1027
1023
1028 def cmp(self, fctx):
1024 def cmp(self, fctx):
1029 """compare with other file context
1025 """compare with other file context
1030
1026
1031 returns True if different than fctx.
1027 returns True if different than fctx.
1032 """
1028 """
1033 # fctx should be a filectx (not a wfctx)
1029 # fctx should be a filectx (not a wfctx)
1034 # invert comparison to reuse the same code path
1030 # invert comparison to reuse the same code path
1035 return fctx.cmp(self)
1031 return fctx.cmp(self)
1036
1032
1037 class memctx(object):
1033 class memctx(object):
1038 """Use memctx to perform in-memory commits via localrepo.commitctx().
1034 """Use memctx to perform in-memory commits via localrepo.commitctx().
1039
1035
1040 Revision information is supplied at initialization time while
1036 Revision information is supplied at initialization time while
1041 related files data and is made available through a callback
1037 related files data and is made available through a callback
1042 mechanism. 'repo' is the current localrepo, 'parents' is a
1038 mechanism. 'repo' is the current localrepo, 'parents' is a
1043 sequence of two parent revisions identifiers (pass None for every
1039 sequence of two parent revisions identifiers (pass None for every
1044 missing parent), 'text' is the commit message and 'files' lists
1040 missing parent), 'text' is the commit message and 'files' lists
1045 names of files touched by the revision (normalized and relative to
1041 names of files touched by the revision (normalized and relative to
1046 repository root).
1042 repository root).
1047
1043
1048 filectxfn(repo, memctx, path) is a callable receiving the
1044 filectxfn(repo, memctx, path) is a callable receiving the
1049 repository, the current memctx object and the normalized path of
1045 repository, the current memctx object and the normalized path of
1050 requested file, relative to repository root. It is fired by the
1046 requested file, relative to repository root. It is fired by the
1051 commit function for every file in 'files', but calls order is
1047 commit function for every file in 'files', but calls order is
1052 undefined. If the file is available in the revision being
1048 undefined. If the file is available in the revision being
1053 committed (updated or added), filectxfn returns a memfilectx
1049 committed (updated or added), filectxfn returns a memfilectx
1054 object. If the file was removed, filectxfn raises an
1050 object. If the file was removed, filectxfn raises an
1055 IOError. Moved files are represented by marking the source file
1051 IOError. Moved files are represented by marking the source file
1056 removed and the new file added with copy information (see
1052 removed and the new file added with copy information (see
1057 memfilectx).
1053 memfilectx).
1058
1054
1059 user receives the committer name and defaults to current
1055 user receives the committer name and defaults to current
1060 repository username, date is the commit date in any format
1056 repository username, date is the commit date in any format
1061 supported by util.parsedate() and defaults to current date, extra
1057 supported by util.parsedate() and defaults to current date, extra
1062 is a dictionary of metadata or is left empty.
1058 is a dictionary of metadata or is left empty.
1063 """
1059 """
1064 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1060 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1065 date=None, extra=None):
1061 date=None, extra=None):
1066 self._repo = repo
1062 self._repo = repo
1067 self._rev = None
1063 self._rev = None
1068 self._node = None
1064 self._node = None
1069 self._text = text
1065 self._text = text
1070 self._date = date and util.parsedate(date) or util.makedate()
1066 self._date = date and util.parsedate(date) or util.makedate()
1071 self._user = user
1067 self._user = user
1072 parents = [(p or nullid) for p in parents]
1068 parents = [(p or nullid) for p in parents]
1073 p1, p2 = parents
1069 p1, p2 = parents
1074 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1070 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1075 files = sorted(set(files))
1071 files = sorted(set(files))
1076 self._status = [files, [], [], [], []]
1072 self._status = [files, [], [], [], []]
1077 self._filectxfn = filectxfn
1073 self._filectxfn = filectxfn
1078
1074
1079 self._extra = extra and extra.copy() or {}
1075 self._extra = extra and extra.copy() or {}
1080 if self._extra.get('branch', '') == '':
1076 if self._extra.get('branch', '') == '':
1081 self._extra['branch'] = 'default'
1077 self._extra['branch'] = 'default'
1082
1078
1083 def __str__(self):
1079 def __str__(self):
1084 return str(self._parents[0]) + "+"
1080 return str(self._parents[0]) + "+"
1085
1081
1086 def __int__(self):
1082 def __int__(self):
1087 return self._rev
1083 return self._rev
1088
1084
1089 def __nonzero__(self):
1085 def __nonzero__(self):
1090 return True
1086 return True
1091
1087
1092 def __getitem__(self, key):
1088 def __getitem__(self, key):
1093 return self.filectx(key)
1089 return self.filectx(key)
1094
1090
1095 def p1(self):
1091 def p1(self):
1096 return self._parents[0]
1092 return self._parents[0]
1097 def p2(self):
1093 def p2(self):
1098 return self._parents[1]
1094 return self._parents[1]
1099
1095
1100 def user(self):
1096 def user(self):
1101 return self._user or self._repo.ui.username()
1097 return self._user or self._repo.ui.username()
1102 def date(self):
1098 def date(self):
1103 return self._date
1099 return self._date
1104 def description(self):
1100 def description(self):
1105 return self._text
1101 return self._text
1106 def files(self):
1102 def files(self):
1107 return self.modified()
1103 return self.modified()
1108 def modified(self):
1104 def modified(self):
1109 return self._status[0]
1105 return self._status[0]
1110 def added(self):
1106 def added(self):
1111 return self._status[1]
1107 return self._status[1]
1112 def removed(self):
1108 def removed(self):
1113 return self._status[2]
1109 return self._status[2]
1114 def deleted(self):
1110 def deleted(self):
1115 return self._status[3]
1111 return self._status[3]
1116 def unknown(self):
1112 def unknown(self):
1117 return self._status[4]
1113 return self._status[4]
1118 def ignored(self):
1114 def ignored(self):
1119 return self._status[5]
1115 return self._status[5]
1120 def clean(self):
1116 def clean(self):
1121 return self._status[6]
1117 return self._status[6]
1122 def branch(self):
1118 def branch(self):
1123 return encoding.tolocal(self._extra['branch'])
1119 return encoding.tolocal(self._extra['branch'])
1124 def extra(self):
1120 def extra(self):
1125 return self._extra
1121 return self._extra
1126 def flags(self, f):
1122 def flags(self, f):
1127 return self[f].flags()
1123 return self[f].flags()
1128
1124
1129 def parents(self):
1125 def parents(self):
1130 """return contexts for each parent changeset"""
1126 """return contexts for each parent changeset"""
1131 return self._parents
1127 return self._parents
1132
1128
1133 def filectx(self, path, filelog=None):
1129 def filectx(self, path, filelog=None):
1134 """get a file context from the working directory"""
1130 """get a file context from the working directory"""
1135 return self._filectxfn(self._repo, self, path)
1131 return self._filectxfn(self._repo, self, path)
1136
1132
1137 def commit(self):
1133 def commit(self):
1138 """commit context to the repo"""
1134 """commit context to the repo"""
1139 return self._repo.commitctx(self)
1135 return self._repo.commitctx(self)
1140
1136
1141 class memfilectx(object):
1137 class memfilectx(object):
1142 """memfilectx represents an in-memory file to commit.
1138 """memfilectx represents an in-memory file to commit.
1143
1139
1144 See memctx for more details.
1140 See memctx for more details.
1145 """
1141 """
1146 def __init__(self, path, data, islink=False, isexec=False, copied=None):
1142 def __init__(self, path, data, islink=False, isexec=False, copied=None):
1147 """
1143 """
1148 path is the normalized file path relative to repository root.
1144 path is the normalized file path relative to repository root.
1149 data is the file content as a string.
1145 data is the file content as a string.
1150 islink is True if the file is a symbolic link.
1146 islink is True if the file is a symbolic link.
1151 isexec is True if the file is executable.
1147 isexec is True if the file is executable.
1152 copied is the source file path if current file was copied in the
1148 copied is the source file path if current file was copied in the
1153 revision being committed, or None."""
1149 revision being committed, or None."""
1154 self._path = path
1150 self._path = path
1155 self._data = data
1151 self._data = data
1156 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1152 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1157 self._copied = None
1153 self._copied = None
1158 if copied:
1154 if copied:
1159 self._copied = (copied, nullid)
1155 self._copied = (copied, nullid)
1160
1156
1161 def __nonzero__(self):
1157 def __nonzero__(self):
1162 return True
1158 return True
1163 def __str__(self):
1159 def __str__(self):
1164 return "%s@%s" % (self.path(), self._changectx)
1160 return "%s@%s" % (self.path(), self._changectx)
1165 def path(self):
1161 def path(self):
1166 return self._path
1162 return self._path
1167 def data(self):
1163 def data(self):
1168 return self._data
1164 return self._data
1169 def flags(self):
1165 def flags(self):
1170 return self._flags
1166 return self._flags
1171 def isexec(self):
1167 def isexec(self):
1172 return 'x' in self._flags
1168 return 'x' in self._flags
1173 def islink(self):
1169 def islink(self):
1174 return 'l' in self._flags
1170 return 'l' in self._flags
1175 def renamed(self):
1171 def renamed(self):
1176 return self._copied
1172 return self._copied
@@ -1,589 +1,594 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, hex, bin
8 from node import nullid, nullrev, hex, bin
9 from i18n import _
9 from i18n import _
10 import scmutil, util, filemerge, copies, subrepo
10 import scmutil, util, filemerge, copies, subrepo
11 import errno, os, shutil
11 import errno, os, shutil
12
12
13 class mergestate(object):
13 class mergestate(object):
14 '''track 3-way merge state of individual files'''
14 '''track 3-way merge state of individual files'''
15 def __init__(self, repo):
15 def __init__(self, repo):
16 self._repo = repo
16 self._repo = repo
17 self._dirty = False
17 self._dirty = False
18 self._read()
18 self._read()
19 def reset(self, node=None):
19 def reset(self, node=None):
20 self._state = {}
20 self._state = {}
21 if node:
21 if node:
22 self._local = node
22 self._local = node
23 shutil.rmtree(self._repo.join("merge"), True)
23 shutil.rmtree(self._repo.join("merge"), True)
24 self._dirty = False
24 self._dirty = False
25 def _read(self):
25 def _read(self):
26 self._state = {}
26 self._state = {}
27 try:
27 try:
28 f = self._repo.opener("merge/state")
28 f = self._repo.opener("merge/state")
29 for i, l in enumerate(f):
29 for i, l in enumerate(f):
30 if i == 0:
30 if i == 0:
31 self._local = bin(l[:-1])
31 self._local = bin(l[:-1])
32 else:
32 else:
33 bits = l[:-1].split("\0")
33 bits = l[:-1].split("\0")
34 self._state[bits[0]] = bits[1:]
34 self._state[bits[0]] = bits[1:]
35 f.close()
35 f.close()
36 except IOError, err:
36 except IOError, err:
37 if err.errno != errno.ENOENT:
37 if err.errno != errno.ENOENT:
38 raise
38 raise
39 self._dirty = False
39 self._dirty = False
40 def commit(self):
40 def commit(self):
41 if self._dirty:
41 if self._dirty:
42 f = self._repo.opener("merge/state", "w")
42 f = self._repo.opener("merge/state", "w")
43 f.write(hex(self._local) + "\n")
43 f.write(hex(self._local) + "\n")
44 for d, v in self._state.iteritems():
44 for d, v in self._state.iteritems():
45 f.write("\0".join([d] + v) + "\n")
45 f.write("\0".join([d] + v) + "\n")
46 f.close()
46 f.close()
47 self._dirty = False
47 self._dirty = False
48 def add(self, fcl, fco, fca, fd, flags):
48 def add(self, fcl, fco, fca, fd, flags):
49 hash = util.sha1(fcl.path()).hexdigest()
49 hash = util.sha1(fcl.path()).hexdigest()
50 self._repo.opener.write("merge/" + hash, fcl.data())
50 self._repo.opener.write("merge/" + hash, fcl.data())
51 self._state[fd] = ['u', hash, fcl.path(), fca.path(),
51 self._state[fd] = ['u', hash, fcl.path(), fca.path(),
52 hex(fca.filenode()), fco.path(), flags]
52 hex(fca.filenode()), fco.path(), flags]
53 self._dirty = True
53 self._dirty = True
54 def __contains__(self, dfile):
54 def __contains__(self, dfile):
55 return dfile in self._state
55 return dfile in self._state
56 def __getitem__(self, dfile):
56 def __getitem__(self, dfile):
57 return self._state[dfile][0]
57 return self._state[dfile][0]
58 def __iter__(self):
58 def __iter__(self):
59 l = self._state.keys()
59 l = self._state.keys()
60 l.sort()
60 l.sort()
61 for f in l:
61 for f in l:
62 yield f
62 yield f
63 def mark(self, dfile, state):
63 def mark(self, dfile, state):
64 self._state[dfile][0] = state
64 self._state[dfile][0] = state
65 self._dirty = True
65 self._dirty = True
66 def resolve(self, dfile, wctx, octx):
66 def resolve(self, dfile, wctx, octx):
67 if self[dfile] == 'r':
67 if self[dfile] == 'r':
68 return 0
68 return 0
69 state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
69 state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
70 f = self._repo.opener("merge/" + hash)
70 f = self._repo.opener("merge/" + hash)
71 self._repo.wwrite(dfile, f.read(), flags)
71 self._repo.wwrite(dfile, f.read(), flags)
72 f.close()
72 f.close()
73 fcd = wctx[dfile]
73 fcd = wctx[dfile]
74 fco = octx[ofile]
74 fco = octx[ofile]
75 fca = self._repo.filectx(afile, fileid=anode)
75 fca = self._repo.filectx(afile, fileid=anode)
76 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
76 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
77 if r is None:
77 if r is None:
78 # no real conflict
78 # no real conflict
79 del self._state[dfile]
79 del self._state[dfile]
80 elif not r:
80 elif not r:
81 self.mark(dfile, 'r')
81 self.mark(dfile, 'r')
82 return r
82 return r
83
83
84 def _checkunknownfile(repo, wctx, mctx, f):
84 def _checkunknownfile(repo, wctx, mctx, f):
85 return (not repo.dirstate._ignore(f)
85 return (not repo.dirstate._ignore(f)
86 and os.path.exists(repo.wjoin(f))
86 and os.path.exists(repo.wjoin(f))
87 and mctx[f].cmp(wctx[f]))
87 and mctx[f].cmp(wctx[f]))
88
88
89 def _checkunknown(repo, wctx, mctx):
89 def _checkunknown(repo, wctx, mctx):
90 "check for collisions between unknown files and files in mctx"
90 "check for collisions between unknown files and files in mctx"
91
91
92 error = False
92 error = False
93 for f in mctx:
93 for f in mctx:
94 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
94 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
95 error = True
95 error = True
96 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
96 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
97 if error:
97 if error:
98 raise util.Abort(_("untracked files in working directory differ "
98 raise util.Abort(_("untracked files in working directory differ "
99 "from files in requested revision"))
99 "from files in requested revision"))
100
100
101 def _checkcollision(mctx, wctx):
101 def _checkcollision(mctx, wctx):
102 "check for case folding collisions in the destination context"
102 "check for case folding collisions in the destination context"
103 folded = {}
103 folded = {}
104 for fn in mctx:
104 for fn in mctx:
105 fold = util.normcase(fn)
105 fold = util.normcase(fn)
106 if fold in folded:
106 if fold in folded:
107 raise util.Abort(_("case-folding collision between %s and %s")
107 raise util.Abort(_("case-folding collision between %s and %s")
108 % (fn, folded[fold]))
108 % (fn, folded[fold]))
109 folded[fold] = fn
109 folded[fold] = fn
110
110
111 if wctx:
111 if wctx:
112 for fn in wctx:
112 for fn in wctx:
113 fold = util.normcase(fn)
113 fold = util.normcase(fn)
114 mfn = folded.get(fold, None)
114 mfn = folded.get(fold, None)
115 if mfn and (mfn != fn):
115 if mfn and (mfn != fn):
116 raise util.Abort(_("case-folding collision between %s and %s")
116 raise util.Abort(_("case-folding collision between %s and %s")
117 % (mfn, fn))
117 % (mfn, fn))
118
118
119 def _forgetremoved(wctx, mctx, branchmerge):
119 def _forgetremoved(wctx, mctx, branchmerge):
120 """
120 """
121 Forget removed files
121 Forget removed files
122
122
123 If we're jumping between revisions (as opposed to merging), and if
123 If we're jumping between revisions (as opposed to merging), and if
124 neither the working directory nor the target rev has the file,
124 neither the working directory nor the target rev has the file,
125 then we need to remove it from the dirstate, to prevent the
125 then we need to remove it from the dirstate, to prevent the
126 dirstate from listing the file when it is no longer in the
126 dirstate from listing the file when it is no longer in the
127 manifest.
127 manifest.
128
128
129 If we're merging, and the other revision has removed a file
129 If we're merging, and the other revision has removed a file
130 that is not present in the working directory, we need to mark it
130 that is not present in the working directory, we need to mark it
131 as removed.
131 as removed.
132 """
132 """
133
133
134 action = []
134 action = []
135 state = branchmerge and 'r' or 'f'
135 state = branchmerge and 'r' or 'f'
136 for f in wctx.deleted():
136 for f in wctx.deleted():
137 if f not in mctx:
137 if f not in mctx:
138 action.append((f, state))
138 action.append((f, state))
139
139
140 if not branchmerge:
140 if not branchmerge:
141 for f in wctx.removed():
141 for f in wctx.removed():
142 if f not in mctx:
142 if f not in mctx:
143 action.append((f, "f"))
143 action.append((f, "f"))
144
144
145 return action
145 return action
146
146
147 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
147 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
148 """
148 """
149 Merge p1 and p2 with ancestor pa and generate merge action list
149 Merge p1 and p2 with ancestor pa and generate merge action list
150
150
151 overwrite = whether we clobber working files
151 overwrite = whether we clobber working files
152 partial = function to filter file lists
152 partial = function to filter file lists
153 """
153 """
154
154
155 def fmerge(f, f2, fa):
155 def fmerge(f, f2, fa):
156 """merge flags"""
156 """merge flags"""
157 a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2)
157 a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2)
158 if m == n: # flags agree
158 if m == n: # flags agree
159 return m # unchanged
159 return m # unchanged
160 if m and n and not a: # flags set, don't agree, differ from parent
160 if m and n and not a: # flags set, don't agree, differ from parent
161 r = repo.ui.promptchoice(
161 r = repo.ui.promptchoice(
162 _(" conflicting flags for %s\n"
162 _(" conflicting flags for %s\n"
163 "(n)one, e(x)ec or sym(l)ink?") % f,
163 "(n)one, e(x)ec or sym(l)ink?") % f,
164 (_("&None"), _("E&xec"), _("Sym&link")), 0)
164 (_("&None"), _("E&xec"), _("Sym&link")), 0)
165 if r == 1:
165 if r == 1:
166 return "x" # Exec
166 return "x" # Exec
167 if r == 2:
167 if r == 2:
168 return "l" # Symlink
168 return "l" # Symlink
169 return ""
169 return ""
170 if m and m != a: # changed from a to m
170 if m and m != a: # changed from a to m
171 return m
171 return m
172 if n and n != a: # changed from a to n
172 if n and n != a: # changed from a to n
173 if n == 'l' or a == 'l':
173 if n == 'l' or a == 'l':
174 # can't automatically merge symlink flag change here, let
174 # can't automatically merge symlink flag change here, let
175 # filemerge take care of it
175 # filemerge take care of it
176 return m
176 return m
177 return n
177 return n
178 return '' # flag was cleared
178 return '' # flag was cleared
179
179
180 def act(msg, m, f, *args):
180 def act(msg, m, f, *args):
181 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
181 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
182 action.append((f, m) + args)
182 action.append((f, m) + args)
183
183
184 action, copy = [], {}
184 action, copy = [], {}
185
185
186 if overwrite:
186 if overwrite:
187 pa = p1
187 pa = p1
188 elif pa == p2: # backwards
188 elif pa == p2: # backwards
189 pa = p1.p1()
189 pa = p1.p1()
190 elif pa and repo.ui.configbool("merge", "followcopies", True):
190 elif pa and repo.ui.configbool("merge", "followcopies", True):
191 dirs = repo.ui.configbool("merge", "followdirs", True)
191 dirs = repo.ui.configbool("merge", "followdirs", True)
192 copy, diverge = copies.mergecopies(repo, p1, p2, pa, dirs)
192 copy, diverge = copies.mergecopies(repo, p1, p2, pa, dirs)
193 for of, fl in diverge.iteritems():
193 for of, fl in diverge.iteritems():
194 act("divergent renames", "dr", of, fl)
194 act("divergent renames", "dr", of, fl)
195
195
196 repo.ui.note(_("resolving manifests\n"))
196 repo.ui.note(_("resolving manifests\n"))
197 repo.ui.debug(" overwrite: %s, partial: %s\n"
197 repo.ui.debug(" overwrite: %s, partial: %s\n"
198 % (bool(overwrite), bool(partial)))
198 % (bool(overwrite), bool(partial)))
199 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, p1, p2))
199 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, p1, p2))
200
200
201 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
201 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
202 copied = set(copy.values())
202 copied = set(copy.values())
203
203
204 if '.hgsubstate' in m1:
204 if '.hgsubstate' in m1:
205 # check whether sub state is modified
205 # check whether sub state is modified
206 for s in p1.substate:
206 for s in p1.substate:
207 if p1.sub(s).dirty():
207 if p1.sub(s).dirty():
208 m1['.hgsubstate'] += "+"
208 m1['.hgsubstate'] += "+"
209 break
209 break
210
210
211 # Compare manifests
211 # Compare manifests
212 for f, n in m1.iteritems():
212 for f, n in m1.iteritems():
213 if partial and not partial(f):
213 if partial and not partial(f):
214 continue
214 continue
215 if f in m2:
215 if f in m2:
216 rflags = fmerge(f, f, f)
216 rflags = fmerge(f, f, f)
217 a = ma.get(f, nullid)
217 a = ma.get(f, nullid)
218 if n == m2[f] or m2[f] == a: # same or local newer
218 if n == m2[f] or m2[f] == a: # same or local newer
219 # is file locally modified or flags need changing?
219 # is file locally modified or flags need changing?
220 # dirstate flags may need to be made current
220 # dirstate flags may need to be made current
221 if m1.flags(f) != rflags or n[20:]:
221 if m1.flags(f) != rflags or n[20:]:
222 act("update permissions", "e", f, rflags)
222 act("update permissions", "e", f, rflags)
223 elif n == a: # remote newer
223 elif n == a: # remote newer
224 act("remote is newer", "g", f, rflags)
224 act("remote is newer", "g", f, rflags)
225 else: # both changed
225 else: # both changed
226 act("versions differ", "m", f, f, f, rflags, False)
226 act("versions differ", "m", f, f, f, rflags, False)
227 elif f in copied: # files we'll deal with on m2 side
227 elif f in copied: # files we'll deal with on m2 side
228 pass
228 pass
229 elif f in copy:
229 elif f in copy:
230 f2 = copy[f]
230 f2 = copy[f]
231 if f2 not in m2: # directory rename
231 if f2 not in m2: # directory rename
232 act("remote renamed directory to " + f2, "d",
232 act("remote renamed directory to " + f2, "d",
233 f, None, f2, m1.flags(f))
233 f, None, f2, m1.flags(f))
234 else: # case 2 A,B/B/B or case 4,21 A/B/B
234 else: # case 2 A,B/B/B or case 4,21 A/B/B
235 act("local copied/moved to " + f2, "m",
235 act("local copied/moved to " + f2, "m",
236 f, f2, f, fmerge(f, f2, f2), False)
236 f, f2, f, fmerge(f, f2, f2), False)
237 elif f in ma: # clean, a different, no remote
237 elif f in ma: # clean, a different, no remote
238 if n != ma[f]:
238 if n != ma[f]:
239 if repo.ui.promptchoice(
239 if repo.ui.promptchoice(
240 _(" local changed %s which remote deleted\n"
240 _(" local changed %s which remote deleted\n"
241 "use (c)hanged version or (d)elete?") % f,
241 "use (c)hanged version or (d)elete?") % f,
242 (_("&Changed"), _("&Delete")), 0):
242 (_("&Changed"), _("&Delete")), 0):
243 act("prompt delete", "r", f)
243 act("prompt delete", "r", f)
244 else:
244 else:
245 act("prompt keep", "a", f)
245 act("prompt keep", "a", f)
246 elif n[20:] == "a": # added, no remote
246 elif n[20:] == "a": # added, no remote
247 act("remote deleted", "f", f)
247 act("remote deleted", "f", f)
248 elif n[20:] != "u":
248 else:
249 act("other deleted", "r", f)
249 act("other deleted", "r", f)
250
250
251 for f, n in m2.iteritems():
251 for f, n in m2.iteritems():
252 if partial and not partial(f):
252 if partial and not partial(f):
253 continue
253 continue
254 if f in m1 or f in copied: # files already visited
254 if f in m1 or f in copied: # files already visited
255 continue
255 continue
256 if f in copy:
256 if f in copy:
257 f2 = copy[f]
257 f2 = copy[f]
258 if f2 not in m1: # directory rename
258 if f2 not in m1: # directory rename
259 act("local renamed directory to " + f2, "d",
259 act("local renamed directory to " + f2, "d",
260 None, f, f2, m2.flags(f))
260 None, f, f2, m2.flags(f))
261 elif f2 in m2: # rename case 1, A/A,B/A
261 elif f2 in m2: # rename case 1, A/A,B/A
262 act("remote copied to " + f, "m",
262 act("remote copied to " + f, "m",
263 f2, f, f, fmerge(f2, f, f2), False)
263 f2, f, f, fmerge(f2, f, f2), False)
264 else: # case 3,20 A/B/A
264 else: # case 3,20 A/B/A
265 act("remote moved to " + f, "m",
265 act("remote moved to " + f, "m",
266 f2, f, f, fmerge(f2, f, f2), True)
266 f2, f, f, fmerge(f2, f, f2), True)
267 elif f not in ma:
267 elif f not in ma:
268 act("remote created", "g", f, m2.flags(f))
268 if (not overwrite
269 and _checkunknownfile(repo, p1, p2, f)):
270 rflags = fmerge(f, f, f)
271 act("remote differs from untracked local",
272 "m", f, f, f, rflags, False)
273 else:
274 act("remote created", "g", f, m2.flags(f))
269 elif n != ma[f]:
275 elif n != ma[f]:
270 if repo.ui.promptchoice(
276 if repo.ui.promptchoice(
271 _("remote changed %s which local deleted\n"
277 _("remote changed %s which local deleted\n"
272 "use (c)hanged version or leave (d)eleted?") % f,
278 "use (c)hanged version or leave (d)eleted?") % f,
273 (_("&Changed"), _("&Deleted")), 0) == 0:
279 (_("&Changed"), _("&Deleted")), 0) == 0:
274 act("prompt recreating", "g", f, m2.flags(f))
280 act("prompt recreating", "g", f, m2.flags(f))
275
281
276 return action
282 return action
277
283
278 def actionkey(a):
284 def actionkey(a):
279 return a[1] == 'r' and -1 or 0, a
285 return a[1] == 'r' and -1 or 0, a
280
286
281 def applyupdates(repo, action, wctx, mctx, actx, overwrite):
287 def applyupdates(repo, action, wctx, mctx, actx, overwrite):
282 """apply the merge action list to the working directory
288 """apply the merge action list to the working directory
283
289
284 wctx is the working copy context
290 wctx is the working copy context
285 mctx is the context to be merged into the working copy
291 mctx is the context to be merged into the working copy
286 actx is the context of the common ancestor
292 actx is the context of the common ancestor
287
293
288 Return a tuple of counts (updated, merged, removed, unresolved) that
294 Return a tuple of counts (updated, merged, removed, unresolved) that
289 describes how many files were affected by the update.
295 describes how many files were affected by the update.
290 """
296 """
291
297
292 updated, merged, removed, unresolved = 0, 0, 0, 0
298 updated, merged, removed, unresolved = 0, 0, 0, 0
293 ms = mergestate(repo)
299 ms = mergestate(repo)
294 ms.reset(wctx.p1().node())
300 ms.reset(wctx.p1().node())
295 moves = []
301 moves = []
296 action.sort(key=actionkey)
302 action.sort(key=actionkey)
297
303
298 # prescan for merges
304 # prescan for merges
299 for a in action:
305 for a in action:
300 f, m = a[:2]
306 f, m = a[:2]
301 if m == 'm': # merge
307 if m == 'm': # merge
302 f2, fd, flags, move = a[2:]
308 f2, fd, flags, move = a[2:]
303 if f == '.hgsubstate': # merged internally
309 if f == '.hgsubstate': # merged internally
304 continue
310 continue
305 repo.ui.debug("preserving %s for resolve of %s\n" % (f, fd))
311 repo.ui.debug("preserving %s for resolve of %s\n" % (f, fd))
306 fcl = wctx[f]
312 fcl = wctx[f]
307 fco = mctx[f2]
313 fco = mctx[f2]
308 if mctx == actx: # backwards, use working dir parent as ancestor
314 if mctx == actx: # backwards, use working dir parent as ancestor
309 if fcl.parents():
315 if fcl.parents():
310 fca = fcl.p1()
316 fca = fcl.p1()
311 else:
317 else:
312 fca = repo.filectx(f, fileid=nullrev)
318 fca = repo.filectx(f, fileid=nullrev)
313 else:
319 else:
314 fca = fcl.ancestor(fco, actx)
320 fca = fcl.ancestor(fco, actx)
315 if not fca:
321 if not fca:
316 fca = repo.filectx(f, fileid=nullrev)
322 fca = repo.filectx(f, fileid=nullrev)
317 ms.add(fcl, fco, fca, fd, flags)
323 ms.add(fcl, fco, fca, fd, flags)
318 if f != fd and move:
324 if f != fd and move:
319 moves.append(f)
325 moves.append(f)
320
326
321 audit = scmutil.pathauditor(repo.root)
327 audit = scmutil.pathauditor(repo.root)
322
328
323 # remove renamed files after safely stored
329 # remove renamed files after safely stored
324 for f in moves:
330 for f in moves:
325 if os.path.lexists(repo.wjoin(f)):
331 if os.path.lexists(repo.wjoin(f)):
326 repo.ui.debug("removing %s\n" % f)
332 repo.ui.debug("removing %s\n" % f)
327 audit(f)
333 audit(f)
328 os.unlink(repo.wjoin(f))
334 os.unlink(repo.wjoin(f))
329
335
330 numupdates = len(action)
336 numupdates = len(action)
331 for i, a in enumerate(action):
337 for i, a in enumerate(action):
332 f, m = a[:2]
338 f, m = a[:2]
333 repo.ui.progress(_('updating'), i + 1, item=f, total=numupdates,
339 repo.ui.progress(_('updating'), i + 1, item=f, total=numupdates,
334 unit=_('files'))
340 unit=_('files'))
335 if f and f[0] == "/":
341 if f and f[0] == "/":
336 continue
342 continue
337 if m == "r": # remove
343 if m == "r": # remove
338 repo.ui.note(_("removing %s\n") % f)
344 repo.ui.note(_("removing %s\n") % f)
339 audit(f)
345 audit(f)
340 if f == '.hgsubstate': # subrepo states need updating
346 if f == '.hgsubstate': # subrepo states need updating
341 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
347 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
342 try:
348 try:
343 util.unlinkpath(repo.wjoin(f))
349 util.unlinkpath(repo.wjoin(f))
344 except OSError, inst:
350 except OSError, inst:
345 if inst.errno != errno.ENOENT:
351 if inst.errno != errno.ENOENT:
346 repo.ui.warn(_("update failed to remove %s: %s!\n") %
352 repo.ui.warn(_("update failed to remove %s: %s!\n") %
347 (f, inst.strerror))
353 (f, inst.strerror))
348 removed += 1
354 removed += 1
349 elif m == "m": # merge
355 elif m == "m": # merge
350 if f == '.hgsubstate': # subrepo states need updating
356 if f == '.hgsubstate': # subrepo states need updating
351 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), overwrite)
357 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), overwrite)
352 continue
358 continue
353 f2, fd, flags, move = a[2:]
359 f2, fd, flags, move = a[2:]
354 repo.wopener.audit(fd)
360 repo.wopener.audit(fd)
355 r = ms.resolve(fd, wctx, mctx)
361 r = ms.resolve(fd, wctx, mctx)
356 if r is not None and r > 0:
362 if r is not None and r > 0:
357 unresolved += 1
363 unresolved += 1
358 else:
364 else:
359 if r is None:
365 if r is None:
360 updated += 1
366 updated += 1
361 else:
367 else:
362 merged += 1
368 merged += 1
363 if (move and repo.dirstate.normalize(fd) != f
369 if (move and repo.dirstate.normalize(fd) != f
364 and os.path.lexists(repo.wjoin(f))):
370 and os.path.lexists(repo.wjoin(f))):
365 repo.ui.debug("removing %s\n" % f)
371 repo.ui.debug("removing %s\n" % f)
366 audit(f)
372 audit(f)
367 os.unlink(repo.wjoin(f))
373 os.unlink(repo.wjoin(f))
368 elif m == "g": # get
374 elif m == "g": # get
369 flags = a[2]
375 flags = a[2]
370 repo.ui.note(_("getting %s\n") % f)
376 repo.ui.note(_("getting %s\n") % f)
371 t = mctx.filectx(f).data()
377 t = mctx.filectx(f).data()
372 repo.wwrite(f, t, flags)
378 repo.wwrite(f, t, flags)
373 t = None
379 t = None
374 updated += 1
380 updated += 1
375 if f == '.hgsubstate': # subrepo states need updating
381 if f == '.hgsubstate': # subrepo states need updating
376 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
382 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
377 elif m == "d": # directory rename
383 elif m == "d": # directory rename
378 f2, fd, flags = a[2:]
384 f2, fd, flags = a[2:]
379 if f:
385 if f:
380 repo.ui.note(_("moving %s to %s\n") % (f, fd))
386 repo.ui.note(_("moving %s to %s\n") % (f, fd))
381 audit(f)
387 audit(f)
382 t = wctx.filectx(f).data()
388 t = wctx.filectx(f).data()
383 repo.wwrite(fd, t, flags)
389 repo.wwrite(fd, t, flags)
384 util.unlinkpath(repo.wjoin(f))
390 util.unlinkpath(repo.wjoin(f))
385 if f2:
391 if f2:
386 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
392 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
387 t = mctx.filectx(f2).data()
393 t = mctx.filectx(f2).data()
388 repo.wwrite(fd, t, flags)
394 repo.wwrite(fd, t, flags)
389 updated += 1
395 updated += 1
390 elif m == "dr": # divergent renames
396 elif m == "dr": # divergent renames
391 fl = a[2]
397 fl = a[2]
392 repo.ui.warn(_("note: possible conflict - %s was renamed "
398 repo.ui.warn(_("note: possible conflict - %s was renamed "
393 "multiple times to:\n") % f)
399 "multiple times to:\n") % f)
394 for nf in fl:
400 for nf in fl:
395 repo.ui.warn(" %s\n" % nf)
401 repo.ui.warn(" %s\n" % nf)
396 elif m == "e": # exec
402 elif m == "e": # exec
397 flags = a[2]
403 flags = a[2]
398 repo.wopener.audit(f)
404 repo.wopener.audit(f)
399 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
405 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
400 ms.commit()
406 ms.commit()
401 repo.ui.progress(_('updating'), None, total=numupdates, unit=_('files'))
407 repo.ui.progress(_('updating'), None, total=numupdates, unit=_('files'))
402
408
403 return updated, merged, removed, unresolved
409 return updated, merged, removed, unresolved
404
410
405 def recordupdates(repo, action, branchmerge):
411 def recordupdates(repo, action, branchmerge):
406 "record merge actions to the dirstate"
412 "record merge actions to the dirstate"
407
413
408 for a in action:
414 for a in action:
409 f, m = a[:2]
415 f, m = a[:2]
410 if m == "r": # remove
416 if m == "r": # remove
411 if branchmerge:
417 if branchmerge:
412 repo.dirstate.remove(f)
418 repo.dirstate.remove(f)
413 else:
419 else:
414 repo.dirstate.drop(f)
420 repo.dirstate.drop(f)
415 elif m == "a": # re-add
421 elif m == "a": # re-add
416 if not branchmerge:
422 if not branchmerge:
417 repo.dirstate.add(f)
423 repo.dirstate.add(f)
418 elif m == "f": # forget
424 elif m == "f": # forget
419 repo.dirstate.drop(f)
425 repo.dirstate.drop(f)
420 elif m == "e": # exec change
426 elif m == "e": # exec change
421 repo.dirstate.normallookup(f)
427 repo.dirstate.normallookup(f)
422 elif m == "g": # get
428 elif m == "g": # get
423 if branchmerge:
429 if branchmerge:
424 repo.dirstate.otherparent(f)
430 repo.dirstate.otherparent(f)
425 else:
431 else:
426 repo.dirstate.normal(f)
432 repo.dirstate.normal(f)
427 elif m == "m": # merge
433 elif m == "m": # merge
428 f2, fd, flag, move = a[2:]
434 f2, fd, flag, move = a[2:]
429 if branchmerge:
435 if branchmerge:
430 # We've done a branch merge, mark this file as merged
436 # We've done a branch merge, mark this file as merged
431 # so that we properly record the merger later
437 # so that we properly record the merger later
432 repo.dirstate.merge(fd)
438 repo.dirstate.merge(fd)
433 if f != f2: # copy/rename
439 if f != f2: # copy/rename
434 if move:
440 if move:
435 repo.dirstate.remove(f)
441 repo.dirstate.remove(f)
436 if f != fd:
442 if f != fd:
437 repo.dirstate.copy(f, fd)
443 repo.dirstate.copy(f, fd)
438 else:
444 else:
439 repo.dirstate.copy(f2, fd)
445 repo.dirstate.copy(f2, fd)
440 else:
446 else:
441 # We've update-merged a locally modified file, so
447 # We've update-merged a locally modified file, so
442 # we set the dirstate to emulate a normal checkout
448 # we set the dirstate to emulate a normal checkout
443 # of that file some time in the past. Thus our
449 # of that file some time in the past. Thus our
444 # merge will appear as a normal local file
450 # merge will appear as a normal local file
445 # modification.
451 # modification.
446 if f2 == fd: # file not locally copied/moved
452 if f2 == fd: # file not locally copied/moved
447 repo.dirstate.normallookup(fd)
453 repo.dirstate.normallookup(fd)
448 if move:
454 if move:
449 repo.dirstate.drop(f)
455 repo.dirstate.drop(f)
450 elif m == "d": # directory rename
456 elif m == "d": # directory rename
451 f2, fd, flag = a[2:]
457 f2, fd, flag = a[2:]
452 if not f2 and f not in repo.dirstate:
458 if not f2 and f not in repo.dirstate:
453 # untracked file moved
459 # untracked file moved
454 continue
460 continue
455 if branchmerge:
461 if branchmerge:
456 repo.dirstate.add(fd)
462 repo.dirstate.add(fd)
457 if f:
463 if f:
458 repo.dirstate.remove(f)
464 repo.dirstate.remove(f)
459 repo.dirstate.copy(f, fd)
465 repo.dirstate.copy(f, fd)
460 if f2:
466 if f2:
461 repo.dirstate.copy(f2, fd)
467 repo.dirstate.copy(f2, fd)
462 else:
468 else:
463 repo.dirstate.normal(fd)
469 repo.dirstate.normal(fd)
464 if f:
470 if f:
465 repo.dirstate.drop(f)
471 repo.dirstate.drop(f)
466
472
467 def update(repo, node, branchmerge, force, partial, ancestor=None):
473 def update(repo, node, branchmerge, force, partial, ancestor=None):
468 """
474 """
469 Perform a merge between the working directory and the given node
475 Perform a merge between the working directory and the given node
470
476
471 node = the node to update to, or None if unspecified
477 node = the node to update to, or None if unspecified
472 branchmerge = whether to merge between branches
478 branchmerge = whether to merge between branches
473 force = whether to force branch merging or file overwriting
479 force = whether to force branch merging or file overwriting
474 partial = a function to filter file lists (dirstate not updated)
480 partial = a function to filter file lists (dirstate not updated)
475
481
476 The table below shows all the behaviors of the update command
482 The table below shows all the behaviors of the update command
477 given the -c and -C or no options, whether the working directory
483 given the -c and -C or no options, whether the working directory
478 is dirty, whether a revision is specified, and the relationship of
484 is dirty, whether a revision is specified, and the relationship of
479 the parent rev to the target rev (linear, on the same named
485 the parent rev to the target rev (linear, on the same named
480 branch, or on another named branch).
486 branch, or on another named branch).
481
487
482 This logic is tested by test-update-branches.t.
488 This logic is tested by test-update-branches.t.
483
489
484 -c -C dirty rev | linear same cross
490 -c -C dirty rev | linear same cross
485 n n n n | ok (1) x
491 n n n n | ok (1) x
486 n n n y | ok ok ok
492 n n n y | ok ok ok
487 n n y * | merge (2) (2)
493 n n y * | merge (2) (2)
488 n y * * | --- discard ---
494 n y * * | --- discard ---
489 y n y * | --- (3) ---
495 y n y * | --- (3) ---
490 y n n * | --- ok ---
496 y n n * | --- ok ---
491 y y * * | --- (4) ---
497 y y * * | --- (4) ---
492
498
493 x = can't happen
499 x = can't happen
494 * = don't-care
500 * = don't-care
495 1 = abort: crosses branches (use 'hg merge' or 'hg update -c')
501 1 = abort: crosses branches (use 'hg merge' or 'hg update -c')
496 2 = abort: crosses branches (use 'hg merge' to merge or
502 2 = abort: crosses branches (use 'hg merge' to merge or
497 use 'hg update -C' to discard changes)
503 use 'hg update -C' to discard changes)
498 3 = abort: uncommitted local changes
504 3 = abort: uncommitted local changes
499 4 = incompatible options (checked in commands.py)
505 4 = incompatible options (checked in commands.py)
500
506
501 Return the same tuple as applyupdates().
507 Return the same tuple as applyupdates().
502 """
508 """
503
509
504 onode = node
510 onode = node
505 wlock = repo.wlock()
511 wlock = repo.wlock()
506 try:
512 try:
507 wc = repo[None]
513 wc = repo[None]
508 if node is None:
514 if node is None:
509 # tip of current branch
515 # tip of current branch
510 try:
516 try:
511 node = repo.branchtags()[wc.branch()]
517 node = repo.branchtags()[wc.branch()]
512 except KeyError:
518 except KeyError:
513 if wc.branch() == "default": # no default branch!
519 if wc.branch() == "default": # no default branch!
514 node = repo.lookup("tip") # update to tip
520 node = repo.lookup("tip") # update to tip
515 else:
521 else:
516 raise util.Abort(_("branch %s not found") % wc.branch())
522 raise util.Abort(_("branch %s not found") % wc.branch())
517 overwrite = force and not branchmerge
523 overwrite = force and not branchmerge
518 pl = wc.parents()
524 pl = wc.parents()
519 p1, p2 = pl[0], repo[node]
525 p1, p2 = pl[0], repo[node]
520 if ancestor:
526 if ancestor:
521 pa = repo[ancestor]
527 pa = repo[ancestor]
522 else:
528 else:
523 pa = p1.ancestor(p2)
529 pa = p1.ancestor(p2)
524
530
525 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
531 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
526
532
527 ### check phase
533 ### check phase
528 if not overwrite and len(pl) > 1:
534 if not overwrite and len(pl) > 1:
529 raise util.Abort(_("outstanding uncommitted merges"))
535 raise util.Abort(_("outstanding uncommitted merges"))
530 if branchmerge:
536 if branchmerge:
531 if pa == p2:
537 if pa == p2:
532 raise util.Abort(_("merging with a working directory ancestor"
538 raise util.Abort(_("merging with a working directory ancestor"
533 " has no effect"))
539 " has no effect"))
534 elif pa == p1:
540 elif pa == p1:
535 if p1.branch() == p2.branch():
541 if p1.branch() == p2.branch():
536 raise util.Abort(_("nothing to merge"),
542 raise util.Abort(_("nothing to merge"),
537 hint=_("use 'hg update' "
543 hint=_("use 'hg update' "
538 "or check 'hg heads'"))
544 "or check 'hg heads'"))
539 if not force and (wc.files() or wc.deleted()):
545 if not force and (wc.files() or wc.deleted()):
540 raise util.Abort(_("outstanding uncommitted changes"),
546 raise util.Abort(_("outstanding uncommitted changes"),
541 hint=_("use 'hg status' to list changes"))
547 hint=_("use 'hg status' to list changes"))
542 for s in wc.substate:
548 for s in wc.substate:
543 if wc.sub(s).dirty():
549 if wc.sub(s).dirty():
544 raise util.Abort(_("outstanding uncommitted changes in "
550 raise util.Abort(_("outstanding uncommitted changes in "
545 "subrepository '%s'") % s)
551 "subrepository '%s'") % s)
546
552
547 elif not overwrite:
553 elif not overwrite:
548 if pa == p1 or pa == p2: # linear
554 if pa == p1 or pa == p2: # linear
549 pass # all good
555 pass # all good
550 elif wc.dirty(missing=True):
556 elif wc.dirty(missing=True):
551 raise util.Abort(_("crosses branches (merge branches or use"
557 raise util.Abort(_("crosses branches (merge branches or use"
552 " --clean to discard changes)"))
558 " --clean to discard changes)"))
553 elif onode is None:
559 elif onode is None:
554 raise util.Abort(_("crosses branches (merge branches or update"
560 raise util.Abort(_("crosses branches (merge branches or update"
555 " --check to force update)"))
561 " --check to force update)"))
556 else:
562 else:
557 # Allow jumping branches if clean and specific rev given
563 # Allow jumping branches if clean and specific rev given
558 pa = p1
564 pa = p1
559
565
560 ### calculate phase
566 ### calculate phase
561 action = []
567 action = []
562 wc.status(unknown=True) # prime cache
563 folding = not util.checkcase(repo.path)
568 folding = not util.checkcase(repo.path)
564 if not force:
569 if not force:
565 _checkunknown(repo, wc, p2)
570 _checkunknown(repo, wc, p2)
566 if folding:
571 if folding:
567 _checkcollision(p2, branchmerge and p1)
572 _checkcollision(p2, branchmerge and p1)
568 action += _forgetremoved(wc, p2, branchmerge)
573 action += _forgetremoved(wc, p2, branchmerge)
569 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
574 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
570
575
571 ### apply phase
576 ### apply phase
572 if not branchmerge: # just jump to the new rev
577 if not branchmerge: # just jump to the new rev
573 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
578 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
574 if not partial:
579 if not partial:
575 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
580 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
576
581
577 stats = applyupdates(repo, action, wc, p2, pa, overwrite)
582 stats = applyupdates(repo, action, wc, p2, pa, overwrite)
578
583
579 if not partial:
584 if not partial:
580 repo.dirstate.setparents(fp1, fp2)
585 repo.dirstate.setparents(fp1, fp2)
581 recordupdates(repo, action, branchmerge)
586 recordupdates(repo, action, branchmerge)
582 if not branchmerge:
587 if not branchmerge:
583 repo.dirstate.setbranch(p2.branch())
588 repo.dirstate.setbranch(p2.branch())
584 finally:
589 finally:
585 wlock.release()
590 wlock.release()
586
591
587 if not partial:
592 if not partial:
588 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
593 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
589 return stats
594 return stats
@@ -1,279 +1,274 b''
1 Create a repo with some stuff in it:
1 Create a repo with some stuff in it:
2
2
3 $ hg init a
3 $ hg init a
4 $ cd a
4 $ cd a
5 $ echo a > a
5 $ echo a > a
6 $ echo a > d
6 $ echo a > d
7 $ echo a > e
7 $ echo a > e
8 $ hg ci -qAm0
8 $ hg ci -qAm0
9 $ echo b > a
9 $ echo b > a
10 $ hg ci -m1 -u bar
10 $ hg ci -m1 -u bar
11 $ hg mv a b
11 $ hg mv a b
12 $ hg ci -m2
12 $ hg ci -m2
13 $ hg cp b c
13 $ hg cp b c
14 $ hg ci -m3 -u baz
14 $ hg ci -m3 -u baz
15 $ echo b > d
15 $ echo b > d
16 $ echo f > e
16 $ echo f > e
17 $ hg ci -m4
17 $ hg ci -m4
18 $ hg up -q 3
18 $ hg up -q 3
19 $ echo b > e
19 $ echo b > e
20 $ hg branch -q stable
20 $ hg branch -q stable
21 $ hg ci -m5
21 $ hg ci -m5
22 $ hg merge -q default --tool internal:local
22 $ hg merge -q default --tool internal:local
23 $ hg branch -q default
23 $ hg branch -q default
24 $ hg ci -m6
24 $ hg ci -m6
25 $ hg phase --public 3
25 $ hg phase --public 3
26 $ hg phase --force --secret 6
26 $ hg phase --force --secret 6
27
27
28 $ hg --config extensions.graphlog= log -G --template '{author}@{rev}.{phase}: {desc}\n'
28 $ hg --config extensions.graphlog= log -G --template '{author}@{rev}.{phase}: {desc}\n'
29 @ test@6.secret: 6
29 @ test@6.secret: 6
30 |\
30 |\
31 | o test@5.draft: 5
31 | o test@5.draft: 5
32 | |
32 | |
33 o | test@4.draft: 4
33 o | test@4.draft: 4
34 |/
34 |/
35 o baz@3.public: 3
35 o baz@3.public: 3
36 |
36 |
37 o test@2.public: 2
37 o test@2.public: 2
38 |
38 |
39 o bar@1.public: 1
39 o bar@1.public: 1
40 |
40 |
41 o test@0.public: 0
41 o test@0.public: 0
42
42
43
43
44 Need to specify a rev:
44 Need to specify a rev:
45
45
46 $ hg graft
46 $ hg graft
47 abort: no revisions specified
47 abort: no revisions specified
48 [255]
48 [255]
49
49
50 Can't graft ancestor:
50 Can't graft ancestor:
51
51
52 $ hg graft 1 2
52 $ hg graft 1 2
53 skipping ancestor revision 1
53 skipping ancestor revision 1
54 skipping ancestor revision 2
54 skipping ancestor revision 2
55 [255]
55 [255]
56
56
57 Can't graft with dirty wd:
57 Can't graft with dirty wd:
58
58
59 $ hg up -q 0
59 $ hg up -q 0
60 $ echo foo > a
60 $ echo foo > a
61 $ hg graft 1
61 $ hg graft 1
62 abort: outstanding uncommitted changes
62 abort: outstanding uncommitted changes
63 [255]
63 [255]
64 $ hg revert a
64 $ hg revert a
65
65
66 Graft a rename:
66 Graft a rename:
67
67
68 $ hg graft 2 -u foo
68 $ hg graft 2 -u foo
69 grafting revision 2
69 grafting revision 2
70 merging a and b to b
70 merging a and b to b
71 $ hg export tip --git
71 $ hg export tip --git
72 # HG changeset patch
72 # HG changeset patch
73 # User foo
73 # User foo
74 # Date 0 0
74 # Date 0 0
75 # Node ID d2e44c99fd3f31c176ea4efb9eca9f6306c81756
75 # Node ID d2e44c99fd3f31c176ea4efb9eca9f6306c81756
76 # Parent 68795b066622ca79a25816a662041d8f78f3cd9e
76 # Parent 68795b066622ca79a25816a662041d8f78f3cd9e
77 2
77 2
78
78
79 diff --git a/a b/b
79 diff --git a/a b/b
80 rename from a
80 rename from a
81 rename to b
81 rename to b
82 --- a/a
82 --- a/a
83 +++ b/b
83 +++ b/b
84 @@ -1,1 +1,1 @@
84 @@ -1,1 +1,1 @@
85 -a
85 -a
86 +b
86 +b
87
87
88 Look for extra:source
88 Look for extra:source
89
89
90 $ hg log --debug -r tip
90 $ hg log --debug -r tip
91 changeset: 7:d2e44c99fd3f31c176ea4efb9eca9f6306c81756
91 changeset: 7:d2e44c99fd3f31c176ea4efb9eca9f6306c81756
92 tag: tip
92 tag: tip
93 phase: draft
93 phase: draft
94 parent: 0:68795b066622ca79a25816a662041d8f78f3cd9e
94 parent: 0:68795b066622ca79a25816a662041d8f78f3cd9e
95 parent: -1:0000000000000000000000000000000000000000
95 parent: -1:0000000000000000000000000000000000000000
96 manifest: 7:5d59766436fd8fbcd38e7bebef0f6eaf3eebe637
96 manifest: 7:5d59766436fd8fbcd38e7bebef0f6eaf3eebe637
97 user: foo
97 user: foo
98 date: Thu Jan 01 00:00:00 1970 +0000
98 date: Thu Jan 01 00:00:00 1970 +0000
99 files+: b
99 files+: b
100 files-: a
100 files-: a
101 extra: branch=default
101 extra: branch=default
102 extra: source=5c095ad7e90f871700f02dd1fa5012cb4498a2d4
102 extra: source=5c095ad7e90f871700f02dd1fa5012cb4498a2d4
103 description:
103 description:
104 2
104 2
105
105
106
106
107
107
108 Graft out of order, skipping a merge and a duplicate
108 Graft out of order, skipping a merge and a duplicate
109
109
110 $ hg graft 1 5 4 3 'merge()' 2 --debug
110 $ hg graft 1 5 4 3 'merge()' 2 --debug
111 skipping ungraftable merge revision 6
111 skipping ungraftable merge revision 6
112 scanning for duplicate grafts
112 scanning for duplicate grafts
113 skipping already grafted revision 2
113 skipping already grafted revision 2
114 grafting revision 1
114 grafting revision 1
115 searching for copies back to rev 1
115 searching for copies back to rev 1
116 unmatched files in local:
116 unmatched files in local:
117 a.orig
118 b
117 b
119 all copies found (* = to merge, ! = divergent):
118 all copies found (* = to merge, ! = divergent):
120 b -> a *
119 b -> a *
121 checking for directory renames
120 checking for directory renames
122 resolving manifests
121 resolving manifests
123 overwrite: False, partial: False
122 overwrite: False, partial: False
124 ancestor: 68795b066622, local: d2e44c99fd3f+, remote: 5d205f8b35b6
123 ancestor: 68795b066622, local: d2e44c99fd3f+, remote: 5d205f8b35b6
125 b: local copied/moved to a -> m
124 b: local copied/moved to a -> m
126 preserving b for resolve of b
125 preserving b for resolve of b
127 updating: b 1/1 files (100.00%)
126 updating: b 1/1 files (100.00%)
128 b
127 b
129 b: searching for copy revision for a
128 b: searching for copy revision for a
130 b: copy a:b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
129 b: copy a:b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
131 grafting revision 5
130 grafting revision 5
132 searching for copies back to rev 1
131 searching for copies back to rev 1
133 unmatched files in local:
134 a.orig
135 resolving manifests
132 resolving manifests
136 overwrite: False, partial: False
133 overwrite: False, partial: False
137 ancestor: 4c60f11aa304, local: 6f5ea6ac8b70+, remote: 97f8bfe72746
134 ancestor: 4c60f11aa304, local: 6f5ea6ac8b70+, remote: 97f8bfe72746
138 e: remote is newer -> g
135 e: remote is newer -> g
139 updating: e 1/1 files (100.00%)
136 updating: e 1/1 files (100.00%)
140 getting e
137 getting e
141 e
138 e
142 grafting revision 4
139 grafting revision 4
143 searching for copies back to rev 1
140 searching for copies back to rev 1
144 unmatched files in local:
145 a.orig
146 resolving manifests
141 resolving manifests
147 overwrite: False, partial: False
142 overwrite: False, partial: False
148 ancestor: 4c60f11aa304, local: 77eb504366ab+, remote: 9c233e8e184d
143 ancestor: 4c60f11aa304, local: 77eb504366ab+, remote: 9c233e8e184d
149 e: versions differ -> m
144 e: versions differ -> m
150 d: remote is newer -> g
145 d: remote is newer -> g
151 preserving e for resolve of e
146 preserving e for resolve of e
152 updating: d 1/2 files (50.00%)
147 updating: d 1/2 files (50.00%)
153 getting d
148 getting d
154 updating: e 2/2 files (100.00%)
149 updating: e 2/2 files (100.00%)
155 picked tool 'internal:merge' for e (binary False symlink False)
150 picked tool 'internal:merge' for e (binary False symlink False)
156 merging e
151 merging e
157 my e@77eb504366ab+ other e@9c233e8e184d ancestor e@68795b066622
152 my e@77eb504366ab+ other e@9c233e8e184d ancestor e@68795b066622
158 warning: conflicts during merge.
153 warning: conflicts during merge.
159 merging e incomplete! (edit conflicts, then use 'hg resolve --mark')
154 merging e incomplete! (edit conflicts, then use 'hg resolve --mark')
160 abort: unresolved conflicts, can't continue
155 abort: unresolved conflicts, can't continue
161 (use hg resolve and hg graft --continue)
156 (use hg resolve and hg graft --continue)
162 [255]
157 [255]
163
158
164 Continue without resolve should fail:
159 Continue without resolve should fail:
165
160
166 $ hg graft -c
161 $ hg graft -c
167 grafting revision 4
162 grafting revision 4
168 abort: unresolved merge conflicts (see hg help resolve)
163 abort: unresolved merge conflicts (see hg help resolve)
169 [255]
164 [255]
170
165
171 Fix up:
166 Fix up:
172
167
173 $ echo b > e
168 $ echo b > e
174 $ hg resolve -m e
169 $ hg resolve -m e
175
170
176 Continue with a revision should fail:
171 Continue with a revision should fail:
177
172
178 $ hg graft -c 6
173 $ hg graft -c 6
179 abort: can't specify --continue and revisions
174 abort: can't specify --continue and revisions
180 [255]
175 [255]
181
176
182 Continue for real, clobber usernames
177 Continue for real, clobber usernames
183
178
184 $ hg graft -c -U
179 $ hg graft -c -U
185 grafting revision 4
180 grafting revision 4
186 grafting revision 3
181 grafting revision 3
187
182
188 Compare with original:
183 Compare with original:
189
184
190 $ hg diff -r 6
185 $ hg diff -r 6
191 $ hg status --rev 0:. -C
186 $ hg status --rev 0:. -C
192 M d
187 M d
193 M e
188 M e
194 A b
189 A b
195 a
190 a
196 A c
191 A c
197 a
192 a
198 R a
193 R a
199
194
200 View graph:
195 View graph:
201
196
202 $ hg --config extensions.graphlog= log -G --template '{author}@{rev}.{phase}: {desc}\n'
197 $ hg --config extensions.graphlog= log -G --template '{author}@{rev}.{phase}: {desc}\n'
203 @ test@11.draft: 3
198 @ test@11.draft: 3
204 |
199 |
205 o test@10.draft: 4
200 o test@10.draft: 4
206 |
201 |
207 o test@9.draft: 5
202 o test@9.draft: 5
208 |
203 |
209 o bar@8.draft: 1
204 o bar@8.draft: 1
210 |
205 |
211 o foo@7.draft: 2
206 o foo@7.draft: 2
212 |
207 |
213 | o test@6.secret: 6
208 | o test@6.secret: 6
214 | |\
209 | |\
215 | | o test@5.draft: 5
210 | | o test@5.draft: 5
216 | | |
211 | | |
217 | o | test@4.draft: 4
212 | o | test@4.draft: 4
218 | |/
213 | |/
219 | o baz@3.public: 3
214 | o baz@3.public: 3
220 | |
215 | |
221 | o test@2.public: 2
216 | o test@2.public: 2
222 | |
217 | |
223 | o bar@1.public: 1
218 | o bar@1.public: 1
224 |/
219 |/
225 o test@0.public: 0
220 o test@0.public: 0
226
221
227 Graft again onto another branch should preserve the original source
222 Graft again onto another branch should preserve the original source
228 $ hg up -q 0
223 $ hg up -q 0
229 $ echo 'g'>g
224 $ echo 'g'>g
230 $ hg add g
225 $ hg add g
231 $ hg ci -m 7
226 $ hg ci -m 7
232 created new head
227 created new head
233 $ hg graft 7
228 $ hg graft 7
234 grafting revision 7
229 grafting revision 7
235
230
236 $ hg log -r 7 --template '{rev}:{node}\n'
231 $ hg log -r 7 --template '{rev}:{node}\n'
237 7:d2e44c99fd3f31c176ea4efb9eca9f6306c81756
232 7:d2e44c99fd3f31c176ea4efb9eca9f6306c81756
238 $ hg log -r 2 --template '{rev}:{node}\n'
233 $ hg log -r 2 --template '{rev}:{node}\n'
239 2:5c095ad7e90f871700f02dd1fa5012cb4498a2d4
234 2:5c095ad7e90f871700f02dd1fa5012cb4498a2d4
240
235
241 $ hg log --debug -r tip
236 $ hg log --debug -r tip
242 changeset: 13:39bb1d13572759bd1e6fc874fed1b12ece047a18
237 changeset: 13:39bb1d13572759bd1e6fc874fed1b12ece047a18
243 tag: tip
238 tag: tip
244 phase: draft
239 phase: draft
245 parent: 12:b592ea63bb0c19a6c5c44685ee29a2284f9f1b8f
240 parent: 12:b592ea63bb0c19a6c5c44685ee29a2284f9f1b8f
246 parent: -1:0000000000000000000000000000000000000000
241 parent: -1:0000000000000000000000000000000000000000
247 manifest: 13:0780e055d8f4cd12eadd5a2719481648f336f7a9
242 manifest: 13:0780e055d8f4cd12eadd5a2719481648f336f7a9
248 user: foo
243 user: foo
249 date: Thu Jan 01 00:00:00 1970 +0000
244 date: Thu Jan 01 00:00:00 1970 +0000
250 files+: b
245 files+: b
251 files-: a
246 files-: a
252 extra: branch=default
247 extra: branch=default
253 extra: source=5c095ad7e90f871700f02dd1fa5012cb4498a2d4
248 extra: source=5c095ad7e90f871700f02dd1fa5012cb4498a2d4
254 description:
249 description:
255 2
250 2
256
251
257
252
258 Disallow grafting an already grafted cset onto its original branch
253 Disallow grafting an already grafted cset onto its original branch
259 $ hg up -q 6
254 $ hg up -q 6
260 $ hg graft 7
255 $ hg graft 7
261 skipping already grafted revision 7 (was grafted from 2)
256 skipping already grafted revision 7 (was grafted from 2)
262 [255]
257 [255]
263
258
264 Disallow grafting already grafted csets with the same origin onto each other
259 Disallow grafting already grafted csets with the same origin onto each other
265 $ hg up -q 13
260 $ hg up -q 13
266 $ hg graft 2
261 $ hg graft 2
267 skipping already grafted revision 2
262 skipping already grafted revision 2
268 [255]
263 [255]
269 $ hg graft 7
264 $ hg graft 7
270 skipping already grafted revision 7 (same origin 2)
265 skipping already grafted revision 7 (same origin 2)
271 [255]
266 [255]
272
267
273 $ hg up -q 7
268 $ hg up -q 7
274 $ hg graft 2
269 $ hg graft 2
275 skipping already grafted revision 2
270 skipping already grafted revision 2
276 [255]
271 [255]
277 $ hg graft tip
272 $ hg graft tip
278 skipping already grafted revision 13 (same origin 2)
273 skipping already grafted revision 13 (same origin 2)
279 [255]
274 [255]
@@ -1,34 +1,34 b''
1 http://mercurial.selenic.com/bts/issue612
1 http://mercurial.selenic.com/bts/issue612
2
2
3 $ hg init
3 $ hg init
4 $ mkdir src
4 $ mkdir src
5 $ echo a > src/a.c
5 $ echo a > src/a.c
6 $ hg ci -Ama
6 $ hg ci -Ama
7 adding src/a.c
7 adding src/a.c
8
8
9 $ hg mv src source
9 $ hg mv src source
10 moving src/a.c to source/a.c (glob)
10 moving src/a.c to source/a.c (glob)
11
11
12 $ hg ci -Ammove
12 $ hg ci -Ammove
13
13
14 $ hg co -C 0
14 $ hg co -C 0
15 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
15 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
16
16
17 $ echo new > src/a.c
17 $ echo new > src/a.c
18 $ echo compiled > src/a.o
18 $ echo compiled > src/a.o
19 $ hg ci -mupdate
19 $ hg ci -mupdate
20 created new head
20 created new head
21
21
22 $ hg status
22 $ hg status
23 ? src/a.o
23 ? src/a.o
24
24
25 $ hg merge
25 $ hg merge
26 merging src/a.c and source/a.c to source/a.c
26 merging src/a.c and source/a.c to source/a.c
27 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
27 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
28 (branch merge, don't forget to commit)
28 (branch merge, don't forget to commit)
29
29
30 $ hg status
30 $ hg status
31 M source/a.c
31 M source/a.c
32 R src/a.c
32 R src/a.c
33 ? source/a.o
33 ? src/a.o
34
34
@@ -1,163 +1,157 b''
1 $ hg init t
1 $ hg init t
2 $ cd t
2 $ cd t
3
3
4 $ mkdir a
4 $ mkdir a
5 $ echo foo > a/a
5 $ echo foo > a/a
6 $ echo bar > a/b
6 $ echo bar > a/b
7 $ hg ci -Am "0"
7 $ hg ci -Am "0"
8 adding a/a
8 adding a/a
9 adding a/b
9 adding a/b
10
10
11 $ hg co -C 0
11 $ hg co -C 0
12 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
12 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
13 $ hg mv a b
13 $ hg mv a b
14 moving a/a to b/a (glob)
14 moving a/a to b/a (glob)
15 moving a/b to b/b (glob)
15 moving a/b to b/b (glob)
16 $ hg ci -m "1 mv a/ b/"
16 $ hg ci -m "1 mv a/ b/"
17
17
18 $ hg co -C 0
18 $ hg co -C 0
19 2 files updated, 0 files merged, 2 files removed, 0 files unresolved
19 2 files updated, 0 files merged, 2 files removed, 0 files unresolved
20 $ echo baz > a/c
20 $ echo baz > a/c
21 $ echo quux > a/d
21 $ echo quux > a/d
22 $ hg add a/c
22 $ hg add a/c
23 $ hg ci -m "2 add a/c"
23 $ hg ci -m "2 add a/c"
24 created new head
24 created new head
25
25
26 $ hg merge --debug 1
26 $ hg merge --debug 1
27 searching for copies back to rev 1
27 searching for copies back to rev 1
28 unmatched files in local:
28 unmatched files in local:
29 a/c
29 a/c
30 a/d
31 unmatched files in other:
30 unmatched files in other:
32 b/a
31 b/a
33 b/b
32 b/b
34 all copies found (* = to merge, ! = divergent):
33 all copies found (* = to merge, ! = divergent):
35 b/a -> a/a
34 b/a -> a/a
36 b/b -> a/b
35 b/b -> a/b
37 checking for directory renames
36 checking for directory renames
38 dir a/ -> b/
37 dir a/ -> b/
39 file a/c -> b/c
38 file a/c -> b/c
40 file a/d -> b/d
41 resolving manifests
39 resolving manifests
42 overwrite: False, partial: False
40 overwrite: False, partial: False
43 ancestor: f9b20c0d4c51, local: ce36d17b18fb+, remote: 397f8b00a740
41 ancestor: f9b20c0d4c51, local: ce36d17b18fb+, remote: 397f8b00a740
44 a/d: remote renamed directory to b/d -> d
45 a/c: remote renamed directory to b/c -> d
42 a/c: remote renamed directory to b/c -> d
46 a/b: other deleted -> r
43 a/b: other deleted -> r
47 a/a: other deleted -> r
44 a/a: other deleted -> r
48 b/a: remote created -> g
45 b/a: remote created -> g
49 b/b: remote created -> g
46 b/b: remote created -> g
50 updating: a/a 1/6 files (16.67%)
47 updating: a/a 1/5 files (20.00%)
51 removing a/a
48 removing a/a
52 updating: a/b 2/6 files (33.33%)
49 updating: a/b 2/5 files (40.00%)
53 removing a/b
50 removing a/b
54 updating: a/c 3/6 files (50.00%)
51 updating: a/c 3/5 files (60.00%)
55 moving a/c to b/c
52 moving a/c to b/c
56 updating: a/d 4/6 files (66.67%)
53 updating: b/a 4/5 files (80.00%)
57 moving a/d to b/d
58 updating: b/a 5/6 files (83.33%)
59 getting b/a
54 getting b/a
60 updating: b/b 6/6 files (100.00%)
55 updating: b/b 5/5 files (100.00%)
61 getting b/b
56 getting b/b
62 4 files updated, 0 files merged, 2 files removed, 0 files unresolved
57 3 files updated, 0 files merged, 2 files removed, 0 files unresolved
63 (branch merge, don't forget to commit)
58 (branch merge, don't forget to commit)
64
59
65 $ echo a/* b/*
60 $ echo a/* b/*
66 a/* b/a b/b b/c b/d
61 a/d b/a b/b b/c
67 $ hg st -C
62 $ hg st -C
68 M b/a
63 M b/a
69 M b/b
64 M b/b
70 A b/c
65 A b/c
71 a/c
66 a/c
72 R a/a
67 R a/a
73 R a/b
68 R a/b
74 R a/c
69 R a/c
75 ? b/d
70 ? a/d
76 $ hg ci -m "3 merge 2+1"
71 $ hg ci -m "3 merge 2+1"
77 $ hg debugrename b/c
72 $ hg debugrename b/c
78 b/c renamed from a/c:354ae8da6e890359ef49ade27b68bbc361f3ca88 (glob)
73 b/c renamed from a/c:354ae8da6e890359ef49ade27b68bbc361f3ca88 (glob)
79
74
80 $ hg co -C 1
75 $ hg co -C 1
81 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
76 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
82 $ hg merge --debug 2
77 $ hg merge --debug 2
83 searching for copies back to rev 1
78 searching for copies back to rev 1
84 unmatched files in local:
79 unmatched files in local:
85 b/a
80 b/a
86 b/b
81 b/b
87 b/d
88 unmatched files in other:
82 unmatched files in other:
89 a/c
83 a/c
90 all copies found (* = to merge, ! = divergent):
84 all copies found (* = to merge, ! = divergent):
91 b/a -> a/a
85 b/a -> a/a
92 b/b -> a/b
86 b/b -> a/b
93 checking for directory renames
87 checking for directory renames
94 dir a/ -> b/
88 dir a/ -> b/
95 file a/c -> b/c
89 file a/c -> b/c
96 resolving manifests
90 resolving manifests
97 overwrite: False, partial: False
91 overwrite: False, partial: False
98 ancestor: f9b20c0d4c51, local: 397f8b00a740+, remote: ce36d17b18fb
92 ancestor: f9b20c0d4c51, local: 397f8b00a740+, remote: ce36d17b18fb
99 None: local renamed directory to b/c -> d
93 None: local renamed directory to b/c -> d
100 updating:None 1/1 files (100.00%)
94 updating:None 1/1 files (100.00%)
101 getting a/c to b/c
95 getting a/c to b/c
102 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
96 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
103 (branch merge, don't forget to commit)
97 (branch merge, don't forget to commit)
104
98
105 $ echo a/* b/*
99 $ echo a/* b/*
106 a/* b/a b/b b/c b/d
100 a/d b/a b/b b/c
107 $ hg st -C
101 $ hg st -C
108 A b/c
102 A b/c
109 a/c
103 a/c
110 ? b/d
104 ? a/d
111 $ hg ci -m "4 merge 1+2"
105 $ hg ci -m "4 merge 1+2"
112 created new head
106 created new head
113 $ hg debugrename b/c
107 $ hg debugrename b/c
114 b/c renamed from a/c:354ae8da6e890359ef49ade27b68bbc361f3ca88 (glob)
108 b/c renamed from a/c:354ae8da6e890359ef49ade27b68bbc361f3ca88 (glob)
115
109
116
110
117 Second scenario with two repos:
111 Second scenario with two repos:
118
112
119 $ cd ..
113 $ cd ..
120 $ hg init r1
114 $ hg init r1
121 $ cd r1
115 $ cd r1
122 $ mkdir a
116 $ mkdir a
123 $ echo foo > a/f
117 $ echo foo > a/f
124 $ hg add a
118 $ hg add a
125 adding a/f (glob)
119 adding a/f (glob)
126 $ hg ci -m "a/f == foo"
120 $ hg ci -m "a/f == foo"
127 $ cd ..
121 $ cd ..
128
122
129 $ hg clone r1 r2
123 $ hg clone r1 r2
130 updating to branch default
124 updating to branch default
131 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
125 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
132 $ cd r2
126 $ cd r2
133 $ hg mv a b
127 $ hg mv a b
134 moving a/f to b/f (glob)
128 moving a/f to b/f (glob)
135 $ echo foo1 > b/f
129 $ echo foo1 > b/f
136 $ hg ci -m" a -> b, b/f == foo1"
130 $ hg ci -m" a -> b, b/f == foo1"
137 $ cd ..
131 $ cd ..
138
132
139 $ cd r1
133 $ cd r1
140 $ mkdir a/aa
134 $ mkdir a/aa
141 $ echo bar > a/aa/g
135 $ echo bar > a/aa/g
142 $ hg add a/aa
136 $ hg add a/aa
143 adding a/aa/g (glob)
137 adding a/aa/g (glob)
144 $ hg ci -m "a/aa/g"
138 $ hg ci -m "a/aa/g"
145 $ hg pull ../r2
139 $ hg pull ../r2
146 pulling from ../r2
140 pulling from ../r2
147 searching for changes
141 searching for changes
148 adding changesets
142 adding changesets
149 adding manifests
143 adding manifests
150 adding file changes
144 adding file changes
151 added 1 changesets with 1 changes to 1 files (+1 heads)
145 added 1 changesets with 1 changes to 1 files (+1 heads)
152 (run 'hg heads' to see heads, 'hg merge' to merge)
146 (run 'hg heads' to see heads, 'hg merge' to merge)
153
147
154 $ hg merge
148 $ hg merge
155 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
149 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
156 (branch merge, don't forget to commit)
150 (branch merge, don't forget to commit)
157
151
158 $ hg st -C
152 $ hg st -C
159 M b/f
153 M b/f
160 A b/aa/g
154 A b/aa/g
161 a/aa/g
155 a/aa/g
162 R a/aa/g
156 R a/aa/g
163 R a/f
157 R a/f
General Comments 0
You need to be logged in to leave comments. Login now