Show More
@@ -1,1165 +1,1166 | |||||
1 | # Copyright 2009-2010 Gregory P. Ward |
|
1 | # Copyright 2009-2010 Gregory P. Ward | |
2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated |
|
2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated | |
3 | # Copyright 2010-2011 Fog Creek Software |
|
3 | # Copyright 2010-2011 Fog Creek Software | |
4 | # Copyright 2010-2011 Unity Technologies |
|
4 | # Copyright 2010-2011 Unity Technologies | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | '''Overridden Mercurial commands and functions for the largefiles extension''' |
|
9 | '''Overridden Mercurial commands and functions for the largefiles extension''' | |
10 |
|
10 | |||
11 | import os |
|
11 | import os | |
12 | import copy |
|
12 | import copy | |
13 |
|
13 | |||
14 | from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \ |
|
14 | from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \ | |
15 | node, archival, error, merge, discovery |
|
15 | node, archival, error, merge, discovery | |
16 | from mercurial.i18n import _ |
|
16 | from mercurial.i18n import _ | |
17 | from mercurial.node import hex |
|
17 | from mercurial.node import hex | |
18 | from hgext import rebase |
|
18 | from hgext import rebase | |
19 |
|
19 | |||
20 | import lfutil |
|
20 | import lfutil | |
21 | import lfcommands |
|
21 | import lfcommands | |
22 |
|
22 | |||
23 | # -- Utility functions: commonly/repeatedly needed functionality --------------- |
|
23 | # -- Utility functions: commonly/repeatedly needed functionality --------------- | |
24 |
|
24 | |||
25 | def installnormalfilesmatchfn(manifest): |
|
25 | def installnormalfilesmatchfn(manifest): | |
26 | '''overrides scmutil.match so that the matcher it returns will ignore all |
|
26 | '''overrides scmutil.match so that the matcher it returns will ignore all | |
27 | largefiles''' |
|
27 | largefiles''' | |
28 | oldmatch = None # for the closure |
|
28 | oldmatch = None # for the closure | |
29 | def overridematch(ctx, pats=[], opts={}, globbed=False, |
|
29 | def overridematch(ctx, pats=[], opts={}, globbed=False, | |
30 | default='relpath'): |
|
30 | default='relpath'): | |
31 | match = oldmatch(ctx, pats, opts, globbed, default) |
|
31 | match = oldmatch(ctx, pats, opts, globbed, default) | |
32 | m = copy.copy(match) |
|
32 | m = copy.copy(match) | |
33 | notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in |
|
33 | notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in | |
34 | manifest) |
|
34 | manifest) | |
35 | m._files = filter(notlfile, m._files) |
|
35 | m._files = filter(notlfile, m._files) | |
36 | m._fmap = set(m._files) |
|
36 | m._fmap = set(m._files) | |
37 | origmatchfn = m.matchfn |
|
37 | origmatchfn = m.matchfn | |
38 | m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None |
|
38 | m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None | |
39 | return m |
|
39 | return m | |
40 | oldmatch = installmatchfn(overridematch) |
|
40 | oldmatch = installmatchfn(overridematch) | |
41 |
|
41 | |||
42 | def installmatchfn(f): |
|
42 | def installmatchfn(f): | |
43 | oldmatch = scmutil.match |
|
43 | oldmatch = scmutil.match | |
44 | setattr(f, 'oldmatch', oldmatch) |
|
44 | setattr(f, 'oldmatch', oldmatch) | |
45 | scmutil.match = f |
|
45 | scmutil.match = f | |
46 | return oldmatch |
|
46 | return oldmatch | |
47 |
|
47 | |||
48 | def restorematchfn(): |
|
48 | def restorematchfn(): | |
49 | '''restores scmutil.match to what it was before installnormalfilesmatchfn |
|
49 | '''restores scmutil.match to what it was before installnormalfilesmatchfn | |
50 | was called. no-op if scmutil.match is its original function. |
|
50 | was called. no-op if scmutil.match is its original function. | |
51 |
|
51 | |||
52 | Note that n calls to installnormalfilesmatchfn will require n calls to |
|
52 | Note that n calls to installnormalfilesmatchfn will require n calls to | |
53 | restore matchfn to reverse''' |
|
53 | restore matchfn to reverse''' | |
54 | scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match) |
|
54 | scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match) | |
55 |
|
55 | |||
56 | def addlargefiles(ui, repo, *pats, **opts): |
|
56 | def addlargefiles(ui, repo, *pats, **opts): | |
57 | large = opts.pop('large', None) |
|
57 | large = opts.pop('large', None) | |
58 | lfsize = lfutil.getminsize( |
|
58 | lfsize = lfutil.getminsize( | |
59 | ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None)) |
|
59 | ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None)) | |
60 |
|
60 | |||
61 | lfmatcher = None |
|
61 | lfmatcher = None | |
62 | if lfutil.islfilesrepo(repo): |
|
62 | if lfutil.islfilesrepo(repo): | |
63 | lfpats = ui.configlist(lfutil.longname, 'patterns', default=[]) |
|
63 | lfpats = ui.configlist(lfutil.longname, 'patterns', default=[]) | |
64 | if lfpats: |
|
64 | if lfpats: | |
65 | lfmatcher = match_.match(repo.root, '', list(lfpats)) |
|
65 | lfmatcher = match_.match(repo.root, '', list(lfpats)) | |
66 |
|
66 | |||
67 | lfnames = [] |
|
67 | lfnames = [] | |
68 | m = scmutil.match(repo[None], pats, opts) |
|
68 | m = scmutil.match(repo[None], pats, opts) | |
69 | m.bad = lambda x, y: None |
|
69 | m.bad = lambda x, y: None | |
70 | wctx = repo[None] |
|
70 | wctx = repo[None] | |
71 | for f in repo.walk(m): |
|
71 | for f in repo.walk(m): | |
72 | exact = m.exact(f) |
|
72 | exact = m.exact(f) | |
73 | lfile = lfutil.standin(f) in wctx |
|
73 | lfile = lfutil.standin(f) in wctx | |
74 | nfile = f in wctx |
|
74 | nfile = f in wctx | |
75 | exists = lfile or nfile |
|
75 | exists = lfile or nfile | |
76 |
|
76 | |||
77 | # Don't warn the user when they attempt to add a normal tracked file. |
|
77 | # Don't warn the user when they attempt to add a normal tracked file. | |
78 | # The normal add code will do that for us. |
|
78 | # The normal add code will do that for us. | |
79 | if exact and exists: |
|
79 | if exact and exists: | |
80 | if lfile: |
|
80 | if lfile: | |
81 | ui.warn(_('%s already a largefile\n') % f) |
|
81 | ui.warn(_('%s already a largefile\n') % f) | |
82 | continue |
|
82 | continue | |
83 |
|
83 | |||
84 | if (exact or not exists) and not lfutil.isstandin(f): |
|
84 | if (exact or not exists) and not lfutil.isstandin(f): | |
85 | wfile = repo.wjoin(f) |
|
85 | wfile = repo.wjoin(f) | |
86 |
|
86 | |||
87 | # In case the file was removed previously, but not committed |
|
87 | # In case the file was removed previously, but not committed | |
88 | # (issue3507) |
|
88 | # (issue3507) | |
89 | if not os.path.exists(wfile): |
|
89 | if not os.path.exists(wfile): | |
90 | continue |
|
90 | continue | |
91 |
|
91 | |||
92 | abovemin = (lfsize and |
|
92 | abovemin = (lfsize and | |
93 | os.lstat(wfile).st_size >= lfsize * 1024 * 1024) |
|
93 | os.lstat(wfile).st_size >= lfsize * 1024 * 1024) | |
94 | if large or abovemin or (lfmatcher and lfmatcher(f)): |
|
94 | if large or abovemin or (lfmatcher and lfmatcher(f)): | |
95 | lfnames.append(f) |
|
95 | lfnames.append(f) | |
96 | if ui.verbose or not exact: |
|
96 | if ui.verbose or not exact: | |
97 | ui.status(_('adding %s as a largefile\n') % m.rel(f)) |
|
97 | ui.status(_('adding %s as a largefile\n') % m.rel(f)) | |
98 |
|
98 | |||
99 | bad = [] |
|
99 | bad = [] | |
100 | standins = [] |
|
100 | standins = [] | |
101 |
|
101 | |||
102 | # Need to lock, otherwise there could be a race condition between |
|
102 | # Need to lock, otherwise there could be a race condition between | |
103 | # when standins are created and added to the repo. |
|
103 | # when standins are created and added to the repo. | |
104 | wlock = repo.wlock() |
|
104 | wlock = repo.wlock() | |
105 | try: |
|
105 | try: | |
106 | if not opts.get('dry_run'): |
|
106 | if not opts.get('dry_run'): | |
107 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
107 | lfdirstate = lfutil.openlfdirstate(ui, repo) | |
108 | for f in lfnames: |
|
108 | for f in lfnames: | |
109 | standinname = lfutil.standin(f) |
|
109 | standinname = lfutil.standin(f) | |
110 | lfutil.writestandin(repo, standinname, hash='', |
|
110 | lfutil.writestandin(repo, standinname, hash='', | |
111 | executable=lfutil.getexecutable(repo.wjoin(f))) |
|
111 | executable=lfutil.getexecutable(repo.wjoin(f))) | |
112 | standins.append(standinname) |
|
112 | standins.append(standinname) | |
113 | if lfdirstate[f] == 'r': |
|
113 | if lfdirstate[f] == 'r': | |
114 | lfdirstate.normallookup(f) |
|
114 | lfdirstate.normallookup(f) | |
115 | else: |
|
115 | else: | |
116 | lfdirstate.add(f) |
|
116 | lfdirstate.add(f) | |
117 | lfdirstate.write() |
|
117 | lfdirstate.write() | |
118 | bad += [lfutil.splitstandin(f) |
|
118 | bad += [lfutil.splitstandin(f) | |
119 | for f in repo[None].add(standins) |
|
119 | for f in repo[None].add(standins) | |
120 | if f in m.files()] |
|
120 | if f in m.files()] | |
121 | finally: |
|
121 | finally: | |
122 | wlock.release() |
|
122 | wlock.release() | |
123 | return bad |
|
123 | return bad | |
124 |
|
124 | |||
125 | def removelargefiles(ui, repo, *pats, **opts): |
|
125 | def removelargefiles(ui, repo, *pats, **opts): | |
126 | after = opts.get('after') |
|
126 | after = opts.get('after') | |
127 | if not pats and not after: |
|
127 | if not pats and not after: | |
128 | raise util.Abort(_('no files specified')) |
|
128 | raise util.Abort(_('no files specified')) | |
129 | m = scmutil.match(repo[None], pats, opts) |
|
129 | m = scmutil.match(repo[None], pats, opts) | |
130 | try: |
|
130 | try: | |
131 | repo.lfstatus = True |
|
131 | repo.lfstatus = True | |
132 | s = repo.status(match=m, clean=True) |
|
132 | s = repo.status(match=m, clean=True) | |
133 | finally: |
|
133 | finally: | |
134 | repo.lfstatus = False |
|
134 | repo.lfstatus = False | |
135 | manifest = repo[None].manifest() |
|
135 | manifest = repo[None].manifest() | |
136 | modified, added, deleted, clean = [[f for f in list |
|
136 | modified, added, deleted, clean = [[f for f in list | |
137 | if lfutil.standin(f) in manifest] |
|
137 | if lfutil.standin(f) in manifest] | |
138 | for list in [s[0], s[1], s[3], s[6]]] |
|
138 | for list in [s[0], s[1], s[3], s[6]]] | |
139 |
|
139 | |||
140 | def warn(files, msg): |
|
140 | def warn(files, msg): | |
141 | for f in files: |
|
141 | for f in files: | |
142 | ui.warn(msg % m.rel(f)) |
|
142 | ui.warn(msg % m.rel(f)) | |
143 | return int(len(files) > 0) |
|
143 | return int(len(files) > 0) | |
144 |
|
144 | |||
145 | result = 0 |
|
145 | result = 0 | |
146 |
|
146 | |||
147 | if after: |
|
147 | if after: | |
148 | remove, forget = deleted, [] |
|
148 | remove, forget = deleted, [] | |
149 | result = warn(modified + added + clean, |
|
149 | result = warn(modified + added + clean, | |
150 | _('not removing %s: file still exists\n')) |
|
150 | _('not removing %s: file still exists\n')) | |
151 | else: |
|
151 | else: | |
152 | remove, forget = deleted + clean, [] |
|
152 | remove, forget = deleted + clean, [] | |
153 | result = warn(modified, _('not removing %s: file is modified (use -f' |
|
153 | result = warn(modified, _('not removing %s: file is modified (use -f' | |
154 | ' to force removal)\n')) |
|
154 | ' to force removal)\n')) | |
155 | result = warn(added, _('not removing %s: file has been marked for add' |
|
155 | result = warn(added, _('not removing %s: file has been marked for add' | |
156 | ' (use forget to undo)\n')) or result |
|
156 | ' (use forget to undo)\n')) or result | |
157 |
|
157 | |||
158 | for f in sorted(remove + forget): |
|
158 | for f in sorted(remove + forget): | |
159 | if ui.verbose or not m.exact(f): |
|
159 | if ui.verbose or not m.exact(f): | |
160 | ui.status(_('removing %s\n') % m.rel(f)) |
|
160 | ui.status(_('removing %s\n') % m.rel(f)) | |
161 |
|
161 | |||
162 | # Need to lock because standin files are deleted then removed from the |
|
162 | # Need to lock because standin files are deleted then removed from the | |
163 | # repository and we could race in-between. |
|
163 | # repository and we could race in-between. | |
164 | wlock = repo.wlock() |
|
164 | wlock = repo.wlock() | |
165 | try: |
|
165 | try: | |
166 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
166 | lfdirstate = lfutil.openlfdirstate(ui, repo) | |
167 | for f in remove: |
|
167 | for f in remove: | |
168 | if not after: |
|
168 | if not after: | |
169 | # If this is being called by addremove, notify the user that we |
|
169 | # If this is being called by addremove, notify the user that we | |
170 | # are removing the file. |
|
170 | # are removing the file. | |
171 | if getattr(repo, "_isaddremove", False): |
|
171 | if getattr(repo, "_isaddremove", False): | |
172 | ui.status(_('removing %s\n') % f) |
|
172 | ui.status(_('removing %s\n') % f) | |
173 | util.unlinkpath(repo.wjoin(f), ignoremissing=True) |
|
173 | util.unlinkpath(repo.wjoin(f), ignoremissing=True) | |
174 | lfdirstate.remove(f) |
|
174 | lfdirstate.remove(f) | |
175 | lfdirstate.write() |
|
175 | lfdirstate.write() | |
176 | forget = [lfutil.standin(f) for f in forget] |
|
176 | forget = [lfutil.standin(f) for f in forget] | |
177 | remove = [lfutil.standin(f) for f in remove] |
|
177 | remove = [lfutil.standin(f) for f in remove] | |
178 | repo[None].forget(forget) |
|
178 | repo[None].forget(forget) | |
179 | # If this is being called by addremove, let the original addremove |
|
179 | # If this is being called by addremove, let the original addremove | |
180 | # function handle this. |
|
180 | # function handle this. | |
181 | if not getattr(repo, "_isaddremove", False): |
|
181 | if not getattr(repo, "_isaddremove", False): | |
182 | for f in remove: |
|
182 | for f in remove: | |
183 | util.unlinkpath(repo.wjoin(f), ignoremissing=True) |
|
183 | util.unlinkpath(repo.wjoin(f), ignoremissing=True) | |
184 | repo[None].forget(remove) |
|
184 | repo[None].forget(remove) | |
185 | finally: |
|
185 | finally: | |
186 | wlock.release() |
|
186 | wlock.release() | |
187 |
|
187 | |||
188 | return result |
|
188 | return result | |
189 |
|
189 | |||
190 | # For overriding mercurial.hgweb.webcommands so that largefiles will |
|
190 | # For overriding mercurial.hgweb.webcommands so that largefiles will | |
191 | # appear at their right place in the manifests. |
|
191 | # appear at their right place in the manifests. | |
192 | def decodepath(orig, path): |
|
192 | def decodepath(orig, path): | |
193 | return lfutil.splitstandin(path) or path |
|
193 | return lfutil.splitstandin(path) or path | |
194 |
|
194 | |||
195 | # -- Wrappers: modify existing commands -------------------------------- |
|
195 | # -- Wrappers: modify existing commands -------------------------------- | |
196 |
|
196 | |||
197 | # Add works by going through the files that the user wanted to add and |
|
197 | # Add works by going through the files that the user wanted to add and | |
198 | # checking if they should be added as largefiles. Then it makes a new |
|
198 | # checking if they should be added as largefiles. Then it makes a new | |
199 | # matcher which matches only the normal files and runs the original |
|
199 | # matcher which matches only the normal files and runs the original | |
200 | # version of add. |
|
200 | # version of add. | |
201 | def overrideadd(orig, ui, repo, *pats, **opts): |
|
201 | def overrideadd(orig, ui, repo, *pats, **opts): | |
202 | normal = opts.pop('normal') |
|
202 | normal = opts.pop('normal') | |
203 | if normal: |
|
203 | if normal: | |
204 | if opts.get('large'): |
|
204 | if opts.get('large'): | |
205 | raise util.Abort(_('--normal cannot be used with --large')) |
|
205 | raise util.Abort(_('--normal cannot be used with --large')) | |
206 | return orig(ui, repo, *pats, **opts) |
|
206 | return orig(ui, repo, *pats, **opts) | |
207 | bad = addlargefiles(ui, repo, *pats, **opts) |
|
207 | bad = addlargefiles(ui, repo, *pats, **opts) | |
208 | installnormalfilesmatchfn(repo[None].manifest()) |
|
208 | installnormalfilesmatchfn(repo[None].manifest()) | |
209 | result = orig(ui, repo, *pats, **opts) |
|
209 | result = orig(ui, repo, *pats, **opts) | |
210 | restorematchfn() |
|
210 | restorematchfn() | |
211 |
|
211 | |||
212 | return (result == 1 or bad) and 1 or 0 |
|
212 | return (result == 1 or bad) and 1 or 0 | |
213 |
|
213 | |||
214 | def overrideremove(orig, ui, repo, *pats, **opts): |
|
214 | def overrideremove(orig, ui, repo, *pats, **opts): | |
215 | installnormalfilesmatchfn(repo[None].manifest()) |
|
215 | installnormalfilesmatchfn(repo[None].manifest()) | |
216 | result = orig(ui, repo, *pats, **opts) |
|
216 | result = orig(ui, repo, *pats, **opts) | |
217 | restorematchfn() |
|
217 | restorematchfn() | |
218 | return removelargefiles(ui, repo, *pats, **opts) or result |
|
218 | return removelargefiles(ui, repo, *pats, **opts) or result | |
219 |
|
219 | |||
220 | def overridestatusfn(orig, repo, rev2, **opts): |
|
220 | def overridestatusfn(orig, repo, rev2, **opts): | |
221 | try: |
|
221 | try: | |
222 | repo._repo.lfstatus = True |
|
222 | repo._repo.lfstatus = True | |
223 | return orig(repo, rev2, **opts) |
|
223 | return orig(repo, rev2, **opts) | |
224 | finally: |
|
224 | finally: | |
225 | repo._repo.lfstatus = False |
|
225 | repo._repo.lfstatus = False | |
226 |
|
226 | |||
227 | def overridestatus(orig, ui, repo, *pats, **opts): |
|
227 | def overridestatus(orig, ui, repo, *pats, **opts): | |
228 | try: |
|
228 | try: | |
229 | repo.lfstatus = True |
|
229 | repo.lfstatus = True | |
230 | return orig(ui, repo, *pats, **opts) |
|
230 | return orig(ui, repo, *pats, **opts) | |
231 | finally: |
|
231 | finally: | |
232 | repo.lfstatus = False |
|
232 | repo.lfstatus = False | |
233 |
|
233 | |||
234 | def overridedirty(orig, repo, ignoreupdate=False): |
|
234 | def overridedirty(orig, repo, ignoreupdate=False): | |
235 | try: |
|
235 | try: | |
236 | repo._repo.lfstatus = True |
|
236 | repo._repo.lfstatus = True | |
237 | return orig(repo, ignoreupdate) |
|
237 | return orig(repo, ignoreupdate) | |
238 | finally: |
|
238 | finally: | |
239 | repo._repo.lfstatus = False |
|
239 | repo._repo.lfstatus = False | |
240 |
|
240 | |||
241 | def overridelog(orig, ui, repo, *pats, **opts): |
|
241 | def overridelog(orig, ui, repo, *pats, **opts): | |
242 | def overridematch(ctx, pats=[], opts={}, globbed=False, |
|
242 | def overridematch(ctx, pats=[], opts={}, globbed=False, | |
243 | default='relpath'): |
|
243 | default='relpath'): | |
244 | """Matcher that merges root directory with .hglf, suitable for log. |
|
244 | """Matcher that merges root directory with .hglf, suitable for log. | |
245 | It is still possible to match .hglf directly. |
|
245 | It is still possible to match .hglf directly. | |
246 | For any listed files run log on the standin too. |
|
246 | For any listed files run log on the standin too. | |
247 | matchfn tries both the given filename and with .hglf stripped. |
|
247 | matchfn tries both the given filename and with .hglf stripped. | |
248 | """ |
|
248 | """ | |
249 | match = oldmatch(ctx, pats, opts, globbed, default) |
|
249 | match = oldmatch(ctx, pats, opts, globbed, default) | |
250 | m = copy.copy(match) |
|
250 | m = copy.copy(match) | |
251 | standins = [lfutil.standin(f) for f in m._files] |
|
251 | standins = [lfutil.standin(f) for f in m._files] | |
252 | m._files.extend(standins) |
|
252 | m._files.extend(standins) | |
253 | m._fmap = set(m._files) |
|
253 | m._fmap = set(m._files) | |
254 | origmatchfn = m.matchfn |
|
254 | origmatchfn = m.matchfn | |
255 | def lfmatchfn(f): |
|
255 | def lfmatchfn(f): | |
256 | lf = lfutil.splitstandin(f) |
|
256 | lf = lfutil.splitstandin(f) | |
257 | if lf is not None and origmatchfn(lf): |
|
257 | if lf is not None and origmatchfn(lf): | |
258 | return True |
|
258 | return True | |
259 | r = origmatchfn(f) |
|
259 | r = origmatchfn(f) | |
260 | return r |
|
260 | return r | |
261 | m.matchfn = lfmatchfn |
|
261 | m.matchfn = lfmatchfn | |
262 | return m |
|
262 | return m | |
263 | oldmatch = installmatchfn(overridematch) |
|
263 | oldmatch = installmatchfn(overridematch) | |
264 | try: |
|
264 | try: | |
265 | repo.lfstatus = True |
|
265 | repo.lfstatus = True | |
266 | return orig(ui, repo, *pats, **opts) |
|
266 | return orig(ui, repo, *pats, **opts) | |
267 | finally: |
|
267 | finally: | |
268 | repo.lfstatus = False |
|
268 | repo.lfstatus = False | |
269 | restorematchfn() |
|
269 | restorematchfn() | |
270 |
|
270 | |||
271 | def overrideverify(orig, ui, repo, *pats, **opts): |
|
271 | def overrideverify(orig, ui, repo, *pats, **opts): | |
272 | large = opts.pop('large', False) |
|
272 | large = opts.pop('large', False) | |
273 | all = opts.pop('lfa', False) |
|
273 | all = opts.pop('lfa', False) | |
274 | contents = opts.pop('lfc', False) |
|
274 | contents = opts.pop('lfc', False) | |
275 |
|
275 | |||
276 | result = orig(ui, repo, *pats, **opts) |
|
276 | result = orig(ui, repo, *pats, **opts) | |
277 | if large or all or contents: |
|
277 | if large or all or contents: | |
278 | result = result or lfcommands.verifylfiles(ui, repo, all, contents) |
|
278 | result = result or lfcommands.verifylfiles(ui, repo, all, contents) | |
279 | return result |
|
279 | return result | |
280 |
|
280 | |||
281 | def overridedebugstate(orig, ui, repo, *pats, **opts): |
|
281 | def overridedebugstate(orig, ui, repo, *pats, **opts): | |
282 | large = opts.pop('large', False) |
|
282 | large = opts.pop('large', False) | |
283 | if large: |
|
283 | if large: | |
284 | lfcommands.debugdirstate(ui, repo) |
|
284 | lfcommands.debugdirstate(ui, repo) | |
285 | else: |
|
285 | else: | |
286 | orig(ui, repo, *pats, **opts) |
|
286 | orig(ui, repo, *pats, **opts) | |
287 |
|
287 | |||
288 | # Override needs to refresh standins so that update's normal merge |
|
288 | # Override needs to refresh standins so that update's normal merge | |
289 | # will go through properly. Then the other update hook (overriding repo.update) |
|
289 | # will go through properly. Then the other update hook (overriding repo.update) | |
290 | # will get the new files. Filemerge is also overridden so that the merge |
|
290 | # will get the new files. Filemerge is also overridden so that the merge | |
291 | # will merge standins correctly. |
|
291 | # will merge standins correctly. | |
292 | def overrideupdate(orig, ui, repo, *pats, **opts): |
|
292 | def overrideupdate(orig, ui, repo, *pats, **opts): | |
293 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
293 | lfdirstate = lfutil.openlfdirstate(ui, repo) | |
294 | s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, |
|
294 | s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, | |
295 | False, False) |
|
295 | False, False) | |
296 | (unsure, modified, added, removed, missing, unknown, ignored, clean) = s |
|
296 | (unsure, modified, added, removed, missing, unknown, ignored, clean) = s | |
297 |
|
297 | |||
298 | # Need to lock between the standins getting updated and their |
|
298 | # Need to lock between the standins getting updated and their | |
299 | # largefiles getting updated |
|
299 | # largefiles getting updated | |
300 | wlock = repo.wlock() |
|
300 | wlock = repo.wlock() | |
301 | try: |
|
301 | try: | |
302 | if opts['check']: |
|
302 | if opts['check']: | |
303 | mod = len(modified) > 0 |
|
303 | mod = len(modified) > 0 | |
304 | for lfile in unsure: |
|
304 | for lfile in unsure: | |
305 | standin = lfutil.standin(lfile) |
|
305 | standin = lfutil.standin(lfile) | |
306 | if repo['.'][standin].data().strip() != \ |
|
306 | if repo['.'][standin].data().strip() != \ | |
307 | lfutil.hashfile(repo.wjoin(lfile)): |
|
307 | lfutil.hashfile(repo.wjoin(lfile)): | |
308 | mod = True |
|
308 | mod = True | |
309 | else: |
|
309 | else: | |
310 | lfdirstate.normal(lfile) |
|
310 | lfdirstate.normal(lfile) | |
311 | lfdirstate.write() |
|
311 | lfdirstate.write() | |
312 | if mod: |
|
312 | if mod: | |
313 | raise util.Abort(_('uncommitted local changes')) |
|
313 | raise util.Abort(_('uncommitted local changes')) | |
314 | # XXX handle removed differently |
|
314 | # XXX handle removed differently | |
315 | if not opts['clean']: |
|
315 | if not opts['clean']: | |
316 | for lfile in unsure + modified + added: |
|
316 | for lfile in unsure + modified + added: | |
317 | lfutil.updatestandin(repo, lfutil.standin(lfile)) |
|
317 | lfutil.updatestandin(repo, lfutil.standin(lfile)) | |
318 | finally: |
|
318 | finally: | |
319 | wlock.release() |
|
319 | wlock.release() | |
320 | return orig(ui, repo, *pats, **opts) |
|
320 | return orig(ui, repo, *pats, **opts) | |
321 |
|
321 | |||
322 | # Before starting the manifest merge, merge.updates will call |
|
322 | # Before starting the manifest merge, merge.updates will call | |
323 | # _checkunknown to check if there are any files in the merged-in |
|
323 | # _checkunknown to check if there are any files in the merged-in | |
324 | # changeset that collide with unknown files in the working copy. |
|
324 | # changeset that collide with unknown files in the working copy. | |
325 | # |
|
325 | # | |
326 | # The largefiles are seen as unknown, so this prevents us from merging |
|
326 | # The largefiles are seen as unknown, so this prevents us from merging | |
327 | # in a file 'foo' if we already have a largefile with the same name. |
|
327 | # in a file 'foo' if we already have a largefile with the same name. | |
328 | # |
|
328 | # | |
329 | # The overridden function filters the unknown files by removing any |
|
329 | # The overridden function filters the unknown files by removing any | |
330 | # largefiles. This makes the merge proceed and we can then handle this |
|
330 | # largefiles. This makes the merge proceed and we can then handle this | |
331 | # case further in the overridden manifestmerge function below. |
|
331 | # case further in the overridden manifestmerge function below. | |
332 | def overridecheckunknownfile(origfn, repo, wctx, mctx, f): |
|
332 | def overridecheckunknownfile(origfn, repo, wctx, mctx, f): | |
333 | if lfutil.standin(f) in wctx: |
|
333 | if lfutil.standin(f) in wctx: | |
334 | return False |
|
334 | return False | |
335 | return origfn(repo, wctx, mctx, f) |
|
335 | return origfn(repo, wctx, mctx, f) | |
336 |
|
336 | |||
337 | # The manifest merge handles conflicts on the manifest level. We want |
|
337 | # The manifest merge handles conflicts on the manifest level. We want | |
338 | # to handle changes in largefile-ness of files at this level too. |
|
338 | # to handle changes in largefile-ness of files at this level too. | |
339 | # |
|
339 | # | |
340 | # The strategy is to run the original manifestmerge and then process |
|
340 | # The strategy is to run the original manifestmerge and then process | |
341 | # the action list it outputs. There are two cases we need to deal with: |
|
341 | # the action list it outputs. There are two cases we need to deal with: | |
342 | # |
|
342 | # | |
343 | # 1. Normal file in p1, largefile in p2. Here the largefile is |
|
343 | # 1. Normal file in p1, largefile in p2. Here the largefile is | |
344 | # detected via its standin file, which will enter the working copy |
|
344 | # detected via its standin file, which will enter the working copy | |
345 | # with a "get" action. It is not "merge" since the standin is all |
|
345 | # with a "get" action. It is not "merge" since the standin is all | |
346 | # Mercurial is concerned with at this level -- the link to the |
|
346 | # Mercurial is concerned with at this level -- the link to the | |
347 | # existing normal file is not relevant here. |
|
347 | # existing normal file is not relevant here. | |
348 | # |
|
348 | # | |
349 | # 2. Largefile in p1, normal file in p2. Here we get a "merge" action |
|
349 | # 2. Largefile in p1, normal file in p2. Here we get a "merge" action | |
350 | # since the largefile will be present in the working copy and |
|
350 | # since the largefile will be present in the working copy and | |
351 | # different from the normal file in p2. Mercurial therefore |
|
351 | # different from the normal file in p2. Mercurial therefore | |
352 | # triggers a merge action. |
|
352 | # triggers a merge action. | |
353 | # |
|
353 | # | |
354 | # In both cases, we prompt the user and emit new actions to either |
|
354 | # In both cases, we prompt the user and emit new actions to either | |
355 | # remove the standin (if the normal file was kept) or to remove the |
|
355 | # remove the standin (if the normal file was kept) or to remove the | |
356 | # normal file and get the standin (if the largefile was kept). The |
|
356 | # normal file and get the standin (if the largefile was kept). The | |
357 | # default prompt answer is to use the largefile version since it was |
|
357 | # default prompt answer is to use the largefile version since it was | |
358 | # presumably changed on purpose. |
|
358 | # presumably changed on purpose. | |
359 | # |
|
359 | # | |
360 | # Finally, the merge.applyupdates function will then take care of |
|
360 | # Finally, the merge.applyupdates function will then take care of | |
361 | # writing the files into the working copy and lfcommands.updatelfiles |
|
361 | # writing the files into the working copy and lfcommands.updatelfiles | |
362 | # will update the largefiles. |
|
362 | # will update the largefiles. | |
363 | def overridemanifestmerge(origfn, repo, p1, p2, pa, branchmerge, force, |
|
363 | def overridemanifestmerge(origfn, repo, p1, p2, pa, branchmerge, force, | |
364 | partial): |
|
364 | partial, acceptremote=False): | |
365 | overwrite = force and not branchmerge |
|
365 | overwrite = force and not branchmerge | |
366 |
actions = origfn(repo, p1, p2, pa, branchmerge, force, partial |
|
366 | actions = origfn(repo, p1, p2, pa, branchmerge, force, partial, | |
|
367 | acceptremote) | |||
367 | processed = [] |
|
368 | processed = [] | |
368 |
|
369 | |||
369 | for action in actions: |
|
370 | for action in actions: | |
370 | if overwrite: |
|
371 | if overwrite: | |
371 | processed.append(action) |
|
372 | processed.append(action) | |
372 | continue |
|
373 | continue | |
373 | f, m, args, msg = action |
|
374 | f, m, args, msg = action | |
374 |
|
375 | |||
375 | choices = (_('&Largefile'), _('&Normal file')) |
|
376 | choices = (_('&Largefile'), _('&Normal file')) | |
376 | if m == "g" and lfutil.splitstandin(f) in p1 and f in p2: |
|
377 | if m == "g" and lfutil.splitstandin(f) in p1 and f in p2: | |
377 | # Case 1: normal file in the working copy, largefile in |
|
378 | # Case 1: normal file in the working copy, largefile in | |
378 | # the second parent |
|
379 | # the second parent | |
379 | lfile = lfutil.splitstandin(f) |
|
380 | lfile = lfutil.splitstandin(f) | |
380 | standin = f |
|
381 | standin = f | |
381 | msg = _('%s has been turned into a largefile\n' |
|
382 | msg = _('%s has been turned into a largefile\n' | |
382 | 'use (l)argefile or keep as (n)ormal file?') % lfile |
|
383 | 'use (l)argefile or keep as (n)ormal file?') % lfile | |
383 | if repo.ui.promptchoice(msg, choices, 0) == 0: |
|
384 | if repo.ui.promptchoice(msg, choices, 0) == 0: | |
384 | processed.append((lfile, "r", None, msg)) |
|
385 | processed.append((lfile, "r", None, msg)) | |
385 | processed.append((standin, "g", (p2.flags(standin),), msg)) |
|
386 | processed.append((standin, "g", (p2.flags(standin),), msg)) | |
386 | else: |
|
387 | else: | |
387 | processed.append((standin, "r", None, msg)) |
|
388 | processed.append((standin, "r", None, msg)) | |
388 | elif m == "g" and lfutil.standin(f) in p1 and f in p2: |
|
389 | elif m == "g" and lfutil.standin(f) in p1 and f in p2: | |
389 | # Case 2: largefile in the working copy, normal file in |
|
390 | # Case 2: largefile in the working copy, normal file in | |
390 | # the second parent |
|
391 | # the second parent | |
391 | standin = lfutil.standin(f) |
|
392 | standin = lfutil.standin(f) | |
392 | lfile = f |
|
393 | lfile = f | |
393 | msg = _('%s has been turned into a normal file\n' |
|
394 | msg = _('%s has been turned into a normal file\n' | |
394 | 'keep as (l)argefile or use (n)ormal file?') % lfile |
|
395 | 'keep as (l)argefile or use (n)ormal file?') % lfile | |
395 | if repo.ui.promptchoice(msg, choices, 0) == 0: |
|
396 | if repo.ui.promptchoice(msg, choices, 0) == 0: | |
396 | processed.append((lfile, "r", None, msg)) |
|
397 | processed.append((lfile, "r", None, msg)) | |
397 | else: |
|
398 | else: | |
398 | processed.append((standin, "r", None, msg)) |
|
399 | processed.append((standin, "r", None, msg)) | |
399 | processed.append((lfile, "g", (p2.flags(lfile),), msg)) |
|
400 | processed.append((lfile, "g", (p2.flags(lfile),), msg)) | |
400 | else: |
|
401 | else: | |
401 | processed.append(action) |
|
402 | processed.append(action) | |
402 |
|
403 | |||
403 | return processed |
|
404 | return processed | |
404 |
|
405 | |||
405 | # Override filemerge to prompt the user about how they wish to merge |
|
406 | # Override filemerge to prompt the user about how they wish to merge | |
406 | # largefiles. This will handle identical edits, and copy/rename + |
|
407 | # largefiles. This will handle identical edits, and copy/rename + | |
407 | # edit without prompting the user. |
|
408 | # edit without prompting the user. | |
408 | def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca): |
|
409 | def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca): | |
409 | # Use better variable names here. Because this is a wrapper we cannot |
|
410 | # Use better variable names here. Because this is a wrapper we cannot | |
410 | # change the variable names in the function declaration. |
|
411 | # change the variable names in the function declaration. | |
411 | fcdest, fcother, fcancestor = fcd, fco, fca |
|
412 | fcdest, fcother, fcancestor = fcd, fco, fca | |
412 | if not lfutil.isstandin(orig): |
|
413 | if not lfutil.isstandin(orig): | |
413 | return origfn(repo, mynode, orig, fcdest, fcother, fcancestor) |
|
414 | return origfn(repo, mynode, orig, fcdest, fcother, fcancestor) | |
414 | else: |
|
415 | else: | |
415 | if not fcother.cmp(fcdest): # files identical? |
|
416 | if not fcother.cmp(fcdest): # files identical? | |
416 | return None |
|
417 | return None | |
417 |
|
418 | |||
418 | # backwards, use working dir parent as ancestor |
|
419 | # backwards, use working dir parent as ancestor | |
419 | if fcancestor == fcother: |
|
420 | if fcancestor == fcother: | |
420 | fcancestor = fcdest.parents()[0] |
|
421 | fcancestor = fcdest.parents()[0] | |
421 |
|
422 | |||
422 | if orig != fcother.path(): |
|
423 | if orig != fcother.path(): | |
423 | repo.ui.status(_('merging %s and %s to %s\n') |
|
424 | repo.ui.status(_('merging %s and %s to %s\n') | |
424 | % (lfutil.splitstandin(orig), |
|
425 | % (lfutil.splitstandin(orig), | |
425 | lfutil.splitstandin(fcother.path()), |
|
426 | lfutil.splitstandin(fcother.path()), | |
426 | lfutil.splitstandin(fcdest.path()))) |
|
427 | lfutil.splitstandin(fcdest.path()))) | |
427 | else: |
|
428 | else: | |
428 | repo.ui.status(_('merging %s\n') |
|
429 | repo.ui.status(_('merging %s\n') | |
429 | % lfutil.splitstandin(fcdest.path())) |
|
430 | % lfutil.splitstandin(fcdest.path())) | |
430 |
|
431 | |||
431 | if fcancestor.path() != fcother.path() and fcother.data() == \ |
|
432 | if fcancestor.path() != fcother.path() and fcother.data() == \ | |
432 | fcancestor.data(): |
|
433 | fcancestor.data(): | |
433 | return 0 |
|
434 | return 0 | |
434 | if fcancestor.path() != fcdest.path() and fcdest.data() == \ |
|
435 | if fcancestor.path() != fcdest.path() and fcdest.data() == \ | |
435 | fcancestor.data(): |
|
436 | fcancestor.data(): | |
436 | repo.wwrite(fcdest.path(), fcother.data(), fcother.flags()) |
|
437 | repo.wwrite(fcdest.path(), fcother.data(), fcother.flags()) | |
437 | return 0 |
|
438 | return 0 | |
438 |
|
439 | |||
439 | if repo.ui.promptchoice(_('largefile %s has a merge conflict\n' |
|
440 | if repo.ui.promptchoice(_('largefile %s has a merge conflict\n' | |
440 | 'keep (l)ocal or take (o)ther?') % |
|
441 | 'keep (l)ocal or take (o)ther?') % | |
441 | lfutil.splitstandin(orig), |
|
442 | lfutil.splitstandin(orig), | |
442 | (_('&Local'), _('&Other')), 0) == 0: |
|
443 | (_('&Local'), _('&Other')), 0) == 0: | |
443 | return 0 |
|
444 | return 0 | |
444 | else: |
|
445 | else: | |
445 | repo.wwrite(fcdest.path(), fcother.data(), fcother.flags()) |
|
446 | repo.wwrite(fcdest.path(), fcother.data(), fcother.flags()) | |
446 | return 0 |
|
447 | return 0 | |
447 |
|
448 | |||
448 | # Copy first changes the matchers to match standins instead of |
|
449 | # Copy first changes the matchers to match standins instead of | |
449 | # largefiles. Then it overrides util.copyfile in that function it |
|
450 | # largefiles. Then it overrides util.copyfile in that function it | |
450 | # checks if the destination largefile already exists. It also keeps a |
|
451 | # checks if the destination largefile already exists. It also keeps a | |
451 | # list of copied files so that the largefiles can be copied and the |
|
452 | # list of copied files so that the largefiles can be copied and the | |
452 | # dirstate updated. |
|
453 | # dirstate updated. | |
453 | def overridecopy(orig, ui, repo, pats, opts, rename=False): |
|
454 | def overridecopy(orig, ui, repo, pats, opts, rename=False): | |
454 | # doesn't remove largefile on rename |
|
455 | # doesn't remove largefile on rename | |
455 | if len(pats) < 2: |
|
456 | if len(pats) < 2: | |
456 | # this isn't legal, let the original function deal with it |
|
457 | # this isn't legal, let the original function deal with it | |
457 | return orig(ui, repo, pats, opts, rename) |
|
458 | return orig(ui, repo, pats, opts, rename) | |
458 |
|
459 | |||
459 | def makestandin(relpath): |
|
460 | def makestandin(relpath): | |
460 | path = scmutil.canonpath(repo.root, repo.getcwd(), relpath) |
|
461 | path = scmutil.canonpath(repo.root, repo.getcwd(), relpath) | |
461 | return os.path.join(repo.wjoin(lfutil.standin(path))) |
|
462 | return os.path.join(repo.wjoin(lfutil.standin(path))) | |
462 |
|
463 | |||
463 | fullpats = scmutil.expandpats(pats) |
|
464 | fullpats = scmutil.expandpats(pats) | |
464 | dest = fullpats[-1] |
|
465 | dest = fullpats[-1] | |
465 |
|
466 | |||
466 | if os.path.isdir(dest): |
|
467 | if os.path.isdir(dest): | |
467 | if not os.path.isdir(makestandin(dest)): |
|
468 | if not os.path.isdir(makestandin(dest)): | |
468 | os.makedirs(makestandin(dest)) |
|
469 | os.makedirs(makestandin(dest)) | |
469 | # This could copy both lfiles and normal files in one command, |
|
470 | # This could copy both lfiles and normal files in one command, | |
470 | # but we don't want to do that. First replace their matcher to |
|
471 | # but we don't want to do that. First replace their matcher to | |
471 | # only match normal files and run it, then replace it to just |
|
472 | # only match normal files and run it, then replace it to just | |
472 | # match largefiles and run it again. |
|
473 | # match largefiles and run it again. | |
473 | nonormalfiles = False |
|
474 | nonormalfiles = False | |
474 | nolfiles = False |
|
475 | nolfiles = False | |
475 | try: |
|
476 | try: | |
476 | try: |
|
477 | try: | |
477 | installnormalfilesmatchfn(repo[None].manifest()) |
|
478 | installnormalfilesmatchfn(repo[None].manifest()) | |
478 | result = orig(ui, repo, pats, opts, rename) |
|
479 | result = orig(ui, repo, pats, opts, rename) | |
479 | except util.Abort, e: |
|
480 | except util.Abort, e: | |
480 | if str(e) != _('no files to copy'): |
|
481 | if str(e) != _('no files to copy'): | |
481 | raise e |
|
482 | raise e | |
482 | else: |
|
483 | else: | |
483 | nonormalfiles = True |
|
484 | nonormalfiles = True | |
484 | result = 0 |
|
485 | result = 0 | |
485 | finally: |
|
486 | finally: | |
486 | restorematchfn() |
|
487 | restorematchfn() | |
487 |
|
488 | |||
488 | # The first rename can cause our current working directory to be removed. |
|
489 | # The first rename can cause our current working directory to be removed. | |
489 | # In that case there is nothing left to copy/rename so just quit. |
|
490 | # In that case there is nothing left to copy/rename so just quit. | |
490 | try: |
|
491 | try: | |
491 | repo.getcwd() |
|
492 | repo.getcwd() | |
492 | except OSError: |
|
493 | except OSError: | |
493 | return result |
|
494 | return result | |
494 |
|
495 | |||
495 | try: |
|
496 | try: | |
496 | try: |
|
497 | try: | |
497 | # When we call orig below it creates the standins but we don't add |
|
498 | # When we call orig below it creates the standins but we don't add | |
498 | # them to the dir state until later so lock during that time. |
|
499 | # them to the dir state until later so lock during that time. | |
499 | wlock = repo.wlock() |
|
500 | wlock = repo.wlock() | |
500 |
|
501 | |||
501 | manifest = repo[None].manifest() |
|
502 | manifest = repo[None].manifest() | |
502 | oldmatch = None # for the closure |
|
503 | oldmatch = None # for the closure | |
503 | def overridematch(ctx, pats=[], opts={}, globbed=False, |
|
504 | def overridematch(ctx, pats=[], opts={}, globbed=False, | |
504 | default='relpath'): |
|
505 | default='relpath'): | |
505 | newpats = [] |
|
506 | newpats = [] | |
506 | # The patterns were previously mangled to add the standin |
|
507 | # The patterns were previously mangled to add the standin | |
507 | # directory; we need to remove that now |
|
508 | # directory; we need to remove that now | |
508 | for pat in pats: |
|
509 | for pat in pats: | |
509 | if match_.patkind(pat) is None and lfutil.shortname in pat: |
|
510 | if match_.patkind(pat) is None and lfutil.shortname in pat: | |
510 | newpats.append(pat.replace(lfutil.shortname, '')) |
|
511 | newpats.append(pat.replace(lfutil.shortname, '')) | |
511 | else: |
|
512 | else: | |
512 | newpats.append(pat) |
|
513 | newpats.append(pat) | |
513 | match = oldmatch(ctx, newpats, opts, globbed, default) |
|
514 | match = oldmatch(ctx, newpats, opts, globbed, default) | |
514 | m = copy.copy(match) |
|
515 | m = copy.copy(match) | |
515 | lfile = lambda f: lfutil.standin(f) in manifest |
|
516 | lfile = lambda f: lfutil.standin(f) in manifest | |
516 | m._files = [lfutil.standin(f) for f in m._files if lfile(f)] |
|
517 | m._files = [lfutil.standin(f) for f in m._files if lfile(f)] | |
517 | m._fmap = set(m._files) |
|
518 | m._fmap = set(m._files) | |
518 | origmatchfn = m.matchfn |
|
519 | origmatchfn = m.matchfn | |
519 | m.matchfn = lambda f: (lfutil.isstandin(f) and |
|
520 | m.matchfn = lambda f: (lfutil.isstandin(f) and | |
520 | (f in manifest) and |
|
521 | (f in manifest) and | |
521 | origmatchfn(lfutil.splitstandin(f)) or |
|
522 | origmatchfn(lfutil.splitstandin(f)) or | |
522 | None) |
|
523 | None) | |
523 | return m |
|
524 | return m | |
524 | oldmatch = installmatchfn(overridematch) |
|
525 | oldmatch = installmatchfn(overridematch) | |
525 | listpats = [] |
|
526 | listpats = [] | |
526 | for pat in pats: |
|
527 | for pat in pats: | |
527 | if match_.patkind(pat) is not None: |
|
528 | if match_.patkind(pat) is not None: | |
528 | listpats.append(pat) |
|
529 | listpats.append(pat) | |
529 | else: |
|
530 | else: | |
530 | listpats.append(makestandin(pat)) |
|
531 | listpats.append(makestandin(pat)) | |
531 |
|
532 | |||
532 | try: |
|
533 | try: | |
533 | origcopyfile = util.copyfile |
|
534 | origcopyfile = util.copyfile | |
534 | copiedfiles = [] |
|
535 | copiedfiles = [] | |
535 | def overridecopyfile(src, dest): |
|
536 | def overridecopyfile(src, dest): | |
536 | if (lfutil.shortname in src and |
|
537 | if (lfutil.shortname in src and | |
537 | dest.startswith(repo.wjoin(lfutil.shortname))): |
|
538 | dest.startswith(repo.wjoin(lfutil.shortname))): | |
538 | destlfile = dest.replace(lfutil.shortname, '') |
|
539 | destlfile = dest.replace(lfutil.shortname, '') | |
539 | if not opts['force'] and os.path.exists(destlfile): |
|
540 | if not opts['force'] and os.path.exists(destlfile): | |
540 | raise IOError('', |
|
541 | raise IOError('', | |
541 | _('destination largefile already exists')) |
|
542 | _('destination largefile already exists')) | |
542 | copiedfiles.append((src, dest)) |
|
543 | copiedfiles.append((src, dest)) | |
543 | origcopyfile(src, dest) |
|
544 | origcopyfile(src, dest) | |
544 |
|
545 | |||
545 | util.copyfile = overridecopyfile |
|
546 | util.copyfile = overridecopyfile | |
546 | result += orig(ui, repo, listpats, opts, rename) |
|
547 | result += orig(ui, repo, listpats, opts, rename) | |
547 | finally: |
|
548 | finally: | |
548 | util.copyfile = origcopyfile |
|
549 | util.copyfile = origcopyfile | |
549 |
|
550 | |||
550 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
551 | lfdirstate = lfutil.openlfdirstate(ui, repo) | |
551 | for (src, dest) in copiedfiles: |
|
552 | for (src, dest) in copiedfiles: | |
552 | if (lfutil.shortname in src and |
|
553 | if (lfutil.shortname in src and | |
553 | dest.startswith(repo.wjoin(lfutil.shortname))): |
|
554 | dest.startswith(repo.wjoin(lfutil.shortname))): | |
554 | srclfile = src.replace(repo.wjoin(lfutil.standin('')), '') |
|
555 | srclfile = src.replace(repo.wjoin(lfutil.standin('')), '') | |
555 | destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '') |
|
556 | destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '') | |
556 | destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.' |
|
557 | destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.' | |
557 | if not os.path.isdir(destlfiledir): |
|
558 | if not os.path.isdir(destlfiledir): | |
558 | os.makedirs(destlfiledir) |
|
559 | os.makedirs(destlfiledir) | |
559 | if rename: |
|
560 | if rename: | |
560 | os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile)) |
|
561 | os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile)) | |
561 | lfdirstate.remove(srclfile) |
|
562 | lfdirstate.remove(srclfile) | |
562 | else: |
|
563 | else: | |
563 | util.copyfile(repo.wjoin(srclfile), |
|
564 | util.copyfile(repo.wjoin(srclfile), | |
564 | repo.wjoin(destlfile)) |
|
565 | repo.wjoin(destlfile)) | |
565 |
|
566 | |||
566 | lfdirstate.add(destlfile) |
|
567 | lfdirstate.add(destlfile) | |
567 | lfdirstate.write() |
|
568 | lfdirstate.write() | |
568 | except util.Abort, e: |
|
569 | except util.Abort, e: | |
569 | if str(e) != _('no files to copy'): |
|
570 | if str(e) != _('no files to copy'): | |
570 | raise e |
|
571 | raise e | |
571 | else: |
|
572 | else: | |
572 | nolfiles = True |
|
573 | nolfiles = True | |
573 | finally: |
|
574 | finally: | |
574 | restorematchfn() |
|
575 | restorematchfn() | |
575 | wlock.release() |
|
576 | wlock.release() | |
576 |
|
577 | |||
577 | if nolfiles and nonormalfiles: |
|
578 | if nolfiles and nonormalfiles: | |
578 | raise util.Abort(_('no files to copy')) |
|
579 | raise util.Abort(_('no files to copy')) | |
579 |
|
580 | |||
580 | return result |
|
581 | return result | |
581 |
|
582 | |||
582 | # When the user calls revert, we have to be careful to not revert any |
|
583 | # When the user calls revert, we have to be careful to not revert any | |
583 | # changes to other largefiles accidentally. This means we have to keep |
|
584 | # changes to other largefiles accidentally. This means we have to keep | |
584 | # track of the largefiles that are being reverted so we only pull down |
|
585 | # track of the largefiles that are being reverted so we only pull down | |
585 | # the necessary largefiles. |
|
586 | # the necessary largefiles. | |
586 | # |
|
587 | # | |
587 | # Standins are only updated (to match the hash of largefiles) before |
|
588 | # Standins are only updated (to match the hash of largefiles) before | |
588 | # commits. Update the standins then run the original revert, changing |
|
589 | # commits. Update the standins then run the original revert, changing | |
589 | # the matcher to hit standins instead of largefiles. Based on the |
|
590 | # the matcher to hit standins instead of largefiles. Based on the | |
590 | # resulting standins update the largefiles. Then return the standins |
|
591 | # resulting standins update the largefiles. Then return the standins | |
591 | # to their proper state |
|
592 | # to their proper state | |
592 | def overriderevert(orig, ui, repo, *pats, **opts): |
|
593 | def overriderevert(orig, ui, repo, *pats, **opts): | |
593 | # Because we put the standins in a bad state (by updating them) |
|
594 | # Because we put the standins in a bad state (by updating them) | |
594 | # and then return them to a correct state we need to lock to |
|
595 | # and then return them to a correct state we need to lock to | |
595 | # prevent others from changing them in their incorrect state. |
|
596 | # prevent others from changing them in their incorrect state. | |
596 | wlock = repo.wlock() |
|
597 | wlock = repo.wlock() | |
597 | try: |
|
598 | try: | |
598 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
599 | lfdirstate = lfutil.openlfdirstate(ui, repo) | |
599 | (modified, added, removed, missing, unknown, ignored, clean) = \ |
|
600 | (modified, added, removed, missing, unknown, ignored, clean) = \ | |
600 | lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev()) |
|
601 | lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev()) | |
601 | lfdirstate.write() |
|
602 | lfdirstate.write() | |
602 | for lfile in modified: |
|
603 | for lfile in modified: | |
603 | lfutil.updatestandin(repo, lfutil.standin(lfile)) |
|
604 | lfutil.updatestandin(repo, lfutil.standin(lfile)) | |
604 | for lfile in missing: |
|
605 | for lfile in missing: | |
605 | if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))): |
|
606 | if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))): | |
606 | os.unlink(repo.wjoin(lfutil.standin(lfile))) |
|
607 | os.unlink(repo.wjoin(lfutil.standin(lfile))) | |
607 |
|
608 | |||
608 | try: |
|
609 | try: | |
609 | ctx = scmutil.revsingle(repo, opts.get('rev')) |
|
610 | ctx = scmutil.revsingle(repo, opts.get('rev')) | |
610 | oldmatch = None # for the closure |
|
611 | oldmatch = None # for the closure | |
611 | def overridematch(ctx, pats=[], opts={}, globbed=False, |
|
612 | def overridematch(ctx, pats=[], opts={}, globbed=False, | |
612 | default='relpath'): |
|
613 | default='relpath'): | |
613 | match = oldmatch(ctx, pats, opts, globbed, default) |
|
614 | match = oldmatch(ctx, pats, opts, globbed, default) | |
614 | m = copy.copy(match) |
|
615 | m = copy.copy(match) | |
615 | def tostandin(f): |
|
616 | def tostandin(f): | |
616 | if lfutil.standin(f) in ctx: |
|
617 | if lfutil.standin(f) in ctx: | |
617 | return lfutil.standin(f) |
|
618 | return lfutil.standin(f) | |
618 | elif lfutil.standin(f) in repo[None]: |
|
619 | elif lfutil.standin(f) in repo[None]: | |
619 | return None |
|
620 | return None | |
620 | return f |
|
621 | return f | |
621 | m._files = [tostandin(f) for f in m._files] |
|
622 | m._files = [tostandin(f) for f in m._files] | |
622 | m._files = [f for f in m._files if f is not None] |
|
623 | m._files = [f for f in m._files if f is not None] | |
623 | m._fmap = set(m._files) |
|
624 | m._fmap = set(m._files) | |
624 | origmatchfn = m.matchfn |
|
625 | origmatchfn = m.matchfn | |
625 | def matchfn(f): |
|
626 | def matchfn(f): | |
626 | if lfutil.isstandin(f): |
|
627 | if lfutil.isstandin(f): | |
627 | # We need to keep track of what largefiles are being |
|
628 | # We need to keep track of what largefiles are being | |
628 | # matched so we know which ones to update later -- |
|
629 | # matched so we know which ones to update later -- | |
629 | # otherwise we accidentally revert changes to other |
|
630 | # otherwise we accidentally revert changes to other | |
630 | # largefiles. This is repo-specific, so duckpunch the |
|
631 | # largefiles. This is repo-specific, so duckpunch the | |
631 | # repo object to keep the list of largefiles for us |
|
632 | # repo object to keep the list of largefiles for us | |
632 | # later. |
|
633 | # later. | |
633 | if origmatchfn(lfutil.splitstandin(f)) and \ |
|
634 | if origmatchfn(lfutil.splitstandin(f)) and \ | |
634 | (f in repo[None] or f in ctx): |
|
635 | (f in repo[None] or f in ctx): | |
635 | lfileslist = getattr(repo, '_lfilestoupdate', []) |
|
636 | lfileslist = getattr(repo, '_lfilestoupdate', []) | |
636 | lfileslist.append(lfutil.splitstandin(f)) |
|
637 | lfileslist.append(lfutil.splitstandin(f)) | |
637 | repo._lfilestoupdate = lfileslist |
|
638 | repo._lfilestoupdate = lfileslist | |
638 | return True |
|
639 | return True | |
639 | else: |
|
640 | else: | |
640 | return False |
|
641 | return False | |
641 | return origmatchfn(f) |
|
642 | return origmatchfn(f) | |
642 | m.matchfn = matchfn |
|
643 | m.matchfn = matchfn | |
643 | return m |
|
644 | return m | |
644 | oldmatch = installmatchfn(overridematch) |
|
645 | oldmatch = installmatchfn(overridematch) | |
645 | scmutil.match |
|
646 | scmutil.match | |
646 | matches = overridematch(repo[None], pats, opts) |
|
647 | matches = overridematch(repo[None], pats, opts) | |
647 | orig(ui, repo, *pats, **opts) |
|
648 | orig(ui, repo, *pats, **opts) | |
648 | finally: |
|
649 | finally: | |
649 | restorematchfn() |
|
650 | restorematchfn() | |
650 | lfileslist = getattr(repo, '_lfilestoupdate', []) |
|
651 | lfileslist = getattr(repo, '_lfilestoupdate', []) | |
651 | lfcommands.updatelfiles(ui, repo, filelist=lfileslist, |
|
652 | lfcommands.updatelfiles(ui, repo, filelist=lfileslist, | |
652 | printmessage=False) |
|
653 | printmessage=False) | |
653 |
|
654 | |||
654 | # empty out the largefiles list so we start fresh next time |
|
655 | # empty out the largefiles list so we start fresh next time | |
655 | repo._lfilestoupdate = [] |
|
656 | repo._lfilestoupdate = [] | |
656 | for lfile in modified: |
|
657 | for lfile in modified: | |
657 | if lfile in lfileslist: |
|
658 | if lfile in lfileslist: | |
658 | if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\ |
|
659 | if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\ | |
659 | in repo['.']: |
|
660 | in repo['.']: | |
660 | lfutil.writestandin(repo, lfutil.standin(lfile), |
|
661 | lfutil.writestandin(repo, lfutil.standin(lfile), | |
661 | repo['.'][lfile].data().strip(), |
|
662 | repo['.'][lfile].data().strip(), | |
662 | 'x' in repo['.'][lfile].flags()) |
|
663 | 'x' in repo['.'][lfile].flags()) | |
663 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
664 | lfdirstate = lfutil.openlfdirstate(ui, repo) | |
664 | for lfile in added: |
|
665 | for lfile in added: | |
665 | standin = lfutil.standin(lfile) |
|
666 | standin = lfutil.standin(lfile) | |
666 | if standin not in ctx and (standin in matches or opts.get('all')): |
|
667 | if standin not in ctx and (standin in matches or opts.get('all')): | |
667 | if lfile in lfdirstate: |
|
668 | if lfile in lfdirstate: | |
668 | lfdirstate.drop(lfile) |
|
669 | lfdirstate.drop(lfile) | |
669 | util.unlinkpath(repo.wjoin(standin)) |
|
670 | util.unlinkpath(repo.wjoin(standin)) | |
670 | lfdirstate.write() |
|
671 | lfdirstate.write() | |
671 | finally: |
|
672 | finally: | |
672 | wlock.release() |
|
673 | wlock.release() | |
673 |
|
674 | |||
674 | def hgupdaterepo(orig, repo, node, overwrite): |
|
675 | def hgupdaterepo(orig, repo, node, overwrite): | |
675 | if not overwrite: |
|
676 | if not overwrite: | |
676 | # Only call updatelfiles on the standins that have changed to save time |
|
677 | # Only call updatelfiles on the standins that have changed to save time | |
677 | oldstandins = lfutil.getstandinsstate(repo) |
|
678 | oldstandins = lfutil.getstandinsstate(repo) | |
678 |
|
679 | |||
679 | result = orig(repo, node, overwrite) |
|
680 | result = orig(repo, node, overwrite) | |
680 |
|
681 | |||
681 | filelist = None |
|
682 | filelist = None | |
682 | if not overwrite: |
|
683 | if not overwrite: | |
683 | newstandins = lfutil.getstandinsstate(repo) |
|
684 | newstandins = lfutil.getstandinsstate(repo) | |
684 | filelist = lfutil.getlfilestoupdate(oldstandins, newstandins) |
|
685 | filelist = lfutil.getlfilestoupdate(oldstandins, newstandins) | |
685 | lfcommands.updatelfiles(repo.ui, repo, filelist=filelist) |
|
686 | lfcommands.updatelfiles(repo.ui, repo, filelist=filelist) | |
686 | return result |
|
687 | return result | |
687 |
|
688 | |||
688 | def hgmerge(orig, repo, node, force=None, remind=True): |
|
689 | def hgmerge(orig, repo, node, force=None, remind=True): | |
689 | result = orig(repo, node, force, remind) |
|
690 | result = orig(repo, node, force, remind) | |
690 | lfcommands.updatelfiles(repo.ui, repo) |
|
691 | lfcommands.updatelfiles(repo.ui, repo) | |
691 | return result |
|
692 | return result | |
692 |
|
693 | |||
693 | # When we rebase a repository with remotely changed largefiles, we need to |
|
694 | # When we rebase a repository with remotely changed largefiles, we need to | |
694 | # take some extra care so that the largefiles are correctly updated in the |
|
695 | # take some extra care so that the largefiles are correctly updated in the | |
695 | # working copy |
|
696 | # working copy | |
696 | def overridepull(orig, ui, repo, source=None, **opts): |
|
697 | def overridepull(orig, ui, repo, source=None, **opts): | |
697 | revsprepull = len(repo) |
|
698 | revsprepull = len(repo) | |
698 | if opts.get('rebase', False): |
|
699 | if opts.get('rebase', False): | |
699 | repo._isrebasing = True |
|
700 | repo._isrebasing = True | |
700 | try: |
|
701 | try: | |
701 | if opts.get('update'): |
|
702 | if opts.get('update'): | |
702 | del opts['update'] |
|
703 | del opts['update'] | |
703 | ui.debug('--update and --rebase are not compatible, ignoring ' |
|
704 | ui.debug('--update and --rebase are not compatible, ignoring ' | |
704 | 'the update flag\n') |
|
705 | 'the update flag\n') | |
705 | del opts['rebase'] |
|
706 | del opts['rebase'] | |
706 | cmdutil.bailifchanged(repo) |
|
707 | cmdutil.bailifchanged(repo) | |
707 | origpostincoming = commands.postincoming |
|
708 | origpostincoming = commands.postincoming | |
708 | def _dummy(*args, **kwargs): |
|
709 | def _dummy(*args, **kwargs): | |
709 | pass |
|
710 | pass | |
710 | commands.postincoming = _dummy |
|
711 | commands.postincoming = _dummy | |
711 | if not source: |
|
712 | if not source: | |
712 | source = 'default' |
|
713 | source = 'default' | |
713 | repo.lfpullsource = source |
|
714 | repo.lfpullsource = source | |
714 | try: |
|
715 | try: | |
715 | result = commands.pull(ui, repo, source, **opts) |
|
716 | result = commands.pull(ui, repo, source, **opts) | |
716 | finally: |
|
717 | finally: | |
717 | commands.postincoming = origpostincoming |
|
718 | commands.postincoming = origpostincoming | |
718 | revspostpull = len(repo) |
|
719 | revspostpull = len(repo) | |
719 | if revspostpull > revsprepull: |
|
720 | if revspostpull > revsprepull: | |
720 | result = result or rebase.rebase(ui, repo) |
|
721 | result = result or rebase.rebase(ui, repo) | |
721 | finally: |
|
722 | finally: | |
722 | repo._isrebasing = False |
|
723 | repo._isrebasing = False | |
723 | else: |
|
724 | else: | |
724 | if not source: |
|
725 | if not source: | |
725 | source = 'default' |
|
726 | source = 'default' | |
726 | repo.lfpullsource = source |
|
727 | repo.lfpullsource = source | |
727 | oldheads = lfutil.getcurrentheads(repo) |
|
728 | oldheads = lfutil.getcurrentheads(repo) | |
728 | result = orig(ui, repo, source, **opts) |
|
729 | result = orig(ui, repo, source, **opts) | |
729 | if opts.get('cache_largefiles'): |
|
730 | if opts.get('cache_largefiles'): | |
730 | # If you are pulling from a remote location that is not your |
|
731 | # If you are pulling from a remote location that is not your | |
731 | # default location, you may want to cache largefiles for new heads |
|
732 | # default location, you may want to cache largefiles for new heads | |
732 | # that have been pulled, so you can easily merge or rebase with |
|
733 | # that have been pulled, so you can easily merge or rebase with | |
733 | # them later |
|
734 | # them later | |
734 | numcached = 0 |
|
735 | numcached = 0 | |
735 | heads = lfutil.getcurrentheads(repo) |
|
736 | heads = lfutil.getcurrentheads(repo) | |
736 | newheads = set(heads).difference(set(oldheads)) |
|
737 | newheads = set(heads).difference(set(oldheads)) | |
737 | if len(newheads) > 0: |
|
738 | if len(newheads) > 0: | |
738 | ui.status(_("caching largefiles for %s heads\n") % |
|
739 | ui.status(_("caching largefiles for %s heads\n") % | |
739 | len(newheads)) |
|
740 | len(newheads)) | |
740 | for head in newheads: |
|
741 | for head in newheads: | |
741 | (cached, missing) = lfcommands.cachelfiles(ui, repo, head) |
|
742 | (cached, missing) = lfcommands.cachelfiles(ui, repo, head) | |
742 | numcached += len(cached) |
|
743 | numcached += len(cached) | |
743 | ui.status(_("%d largefiles cached\n") % numcached) |
|
744 | ui.status(_("%d largefiles cached\n") % numcached) | |
744 | if opts.get('all_largefiles'): |
|
745 | if opts.get('all_largefiles'): | |
745 | revspostpull = len(repo) |
|
746 | revspostpull = len(repo) | |
746 | revs = [] |
|
747 | revs = [] | |
747 | for rev in xrange(revsprepull, revspostpull): |
|
748 | for rev in xrange(revsprepull, revspostpull): | |
748 | revs.append(repo[rev].rev()) |
|
749 | revs.append(repo[rev].rev()) | |
749 | lfcommands.downloadlfiles(ui, repo, revs) |
|
750 | lfcommands.downloadlfiles(ui, repo, revs) | |
750 | return result |
|
751 | return result | |
751 |
|
752 | |||
752 | def overrideclone(orig, ui, source, dest=None, **opts): |
|
753 | def overrideclone(orig, ui, source, dest=None, **opts): | |
753 | d = dest |
|
754 | d = dest | |
754 | if d is None: |
|
755 | if d is None: | |
755 | d = hg.defaultdest(source) |
|
756 | d = hg.defaultdest(source) | |
756 | if opts.get('all_largefiles') and not hg.islocal(d): |
|
757 | if opts.get('all_largefiles') and not hg.islocal(d): | |
757 | raise util.Abort(_( |
|
758 | raise util.Abort(_( | |
758 | '--all-largefiles is incompatible with non-local destination %s' % |
|
759 | '--all-largefiles is incompatible with non-local destination %s' % | |
759 | d)) |
|
760 | d)) | |
760 |
|
761 | |||
761 | return orig(ui, source, dest, **opts) |
|
762 | return orig(ui, source, dest, **opts) | |
762 |
|
763 | |||
763 | def hgclone(orig, ui, opts, *args, **kwargs): |
|
764 | def hgclone(orig, ui, opts, *args, **kwargs): | |
764 | result = orig(ui, opts, *args, **kwargs) |
|
765 | result = orig(ui, opts, *args, **kwargs) | |
765 |
|
766 | |||
766 | if result is not None: |
|
767 | if result is not None: | |
767 | sourcerepo, destrepo = result |
|
768 | sourcerepo, destrepo = result | |
768 | repo = destrepo.local() |
|
769 | repo = destrepo.local() | |
769 |
|
770 | |||
770 | # Caching is implicitly limited to 'rev' option, since the dest repo was |
|
771 | # Caching is implicitly limited to 'rev' option, since the dest repo was | |
771 | # truncated at that point. The user may expect a download count with |
|
772 | # truncated at that point. The user may expect a download count with | |
772 | # this option, so attempt whether or not this is a largefile repo. |
|
773 | # this option, so attempt whether or not this is a largefile repo. | |
773 | if opts.get('all_largefiles'): |
|
774 | if opts.get('all_largefiles'): | |
774 | success, missing = lfcommands.downloadlfiles(ui, repo, None) |
|
775 | success, missing = lfcommands.downloadlfiles(ui, repo, None) | |
775 |
|
776 | |||
776 | if missing != 0: |
|
777 | if missing != 0: | |
777 | return None |
|
778 | return None | |
778 |
|
779 | |||
779 | return result |
|
780 | return result | |
780 |
|
781 | |||
781 | def overriderebase(orig, ui, repo, **opts): |
|
782 | def overriderebase(orig, ui, repo, **opts): | |
782 | repo._isrebasing = True |
|
783 | repo._isrebasing = True | |
783 | try: |
|
784 | try: | |
784 | return orig(ui, repo, **opts) |
|
785 | return orig(ui, repo, **opts) | |
785 | finally: |
|
786 | finally: | |
786 | repo._isrebasing = False |
|
787 | repo._isrebasing = False | |
787 |
|
788 | |||
788 | def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None, |
|
789 | def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None, | |
789 | prefix=None, mtime=None, subrepos=None): |
|
790 | prefix=None, mtime=None, subrepos=None): | |
790 | # No need to lock because we are only reading history and |
|
791 | # No need to lock because we are only reading history and | |
791 | # largefile caches, neither of which are modified. |
|
792 | # largefile caches, neither of which are modified. | |
792 | lfcommands.cachelfiles(repo.ui, repo, node) |
|
793 | lfcommands.cachelfiles(repo.ui, repo, node) | |
793 |
|
794 | |||
794 | if kind not in archival.archivers: |
|
795 | if kind not in archival.archivers: | |
795 | raise util.Abort(_("unknown archive type '%s'") % kind) |
|
796 | raise util.Abort(_("unknown archive type '%s'") % kind) | |
796 |
|
797 | |||
797 | ctx = repo[node] |
|
798 | ctx = repo[node] | |
798 |
|
799 | |||
799 | if kind == 'files': |
|
800 | if kind == 'files': | |
800 | if prefix: |
|
801 | if prefix: | |
801 | raise util.Abort( |
|
802 | raise util.Abort( | |
802 | _('cannot give prefix when archiving to files')) |
|
803 | _('cannot give prefix when archiving to files')) | |
803 | else: |
|
804 | else: | |
804 | prefix = archival.tidyprefix(dest, kind, prefix) |
|
805 | prefix = archival.tidyprefix(dest, kind, prefix) | |
805 |
|
806 | |||
806 | def write(name, mode, islink, getdata): |
|
807 | def write(name, mode, islink, getdata): | |
807 | if matchfn and not matchfn(name): |
|
808 | if matchfn and not matchfn(name): | |
808 | return |
|
809 | return | |
809 | data = getdata() |
|
810 | data = getdata() | |
810 | if decode: |
|
811 | if decode: | |
811 | data = repo.wwritedata(name, data) |
|
812 | data = repo.wwritedata(name, data) | |
812 | archiver.addfile(prefix + name, mode, islink, data) |
|
813 | archiver.addfile(prefix + name, mode, islink, data) | |
813 |
|
814 | |||
814 | archiver = archival.archivers[kind](dest, mtime or ctx.date()[0]) |
|
815 | archiver = archival.archivers[kind](dest, mtime or ctx.date()[0]) | |
815 |
|
816 | |||
816 | if repo.ui.configbool("ui", "archivemeta", True): |
|
817 | if repo.ui.configbool("ui", "archivemeta", True): | |
817 | def metadata(): |
|
818 | def metadata(): | |
818 | base = 'repo: %s\nnode: %s\nbranch: %s\n' % ( |
|
819 | base = 'repo: %s\nnode: %s\nbranch: %s\n' % ( | |
819 | hex(repo.changelog.node(0)), hex(node), ctx.branch()) |
|
820 | hex(repo.changelog.node(0)), hex(node), ctx.branch()) | |
820 |
|
821 | |||
821 | tags = ''.join('tag: %s\n' % t for t in ctx.tags() |
|
822 | tags = ''.join('tag: %s\n' % t for t in ctx.tags() | |
822 | if repo.tagtype(t) == 'global') |
|
823 | if repo.tagtype(t) == 'global') | |
823 | if not tags: |
|
824 | if not tags: | |
824 | repo.ui.pushbuffer() |
|
825 | repo.ui.pushbuffer() | |
825 | opts = {'template': '{latesttag}\n{latesttagdistance}', |
|
826 | opts = {'template': '{latesttag}\n{latesttagdistance}', | |
826 | 'style': '', 'patch': None, 'git': None} |
|
827 | 'style': '', 'patch': None, 'git': None} | |
827 | cmdutil.show_changeset(repo.ui, repo, opts).show(ctx) |
|
828 | cmdutil.show_changeset(repo.ui, repo, opts).show(ctx) | |
828 | ltags, dist = repo.ui.popbuffer().split('\n') |
|
829 | ltags, dist = repo.ui.popbuffer().split('\n') | |
829 | tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':')) |
|
830 | tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':')) | |
830 | tags += 'latesttagdistance: %s\n' % dist |
|
831 | tags += 'latesttagdistance: %s\n' % dist | |
831 |
|
832 | |||
832 | return base + tags |
|
833 | return base + tags | |
833 |
|
834 | |||
834 | write('.hg_archival.txt', 0644, False, metadata) |
|
835 | write('.hg_archival.txt', 0644, False, metadata) | |
835 |
|
836 | |||
836 | for f in ctx: |
|
837 | for f in ctx: | |
837 | ff = ctx.flags(f) |
|
838 | ff = ctx.flags(f) | |
838 | getdata = ctx[f].data |
|
839 | getdata = ctx[f].data | |
839 | if lfutil.isstandin(f): |
|
840 | if lfutil.isstandin(f): | |
840 | path = lfutil.findfile(repo, getdata().strip()) |
|
841 | path = lfutil.findfile(repo, getdata().strip()) | |
841 | if path is None: |
|
842 | if path is None: | |
842 | raise util.Abort( |
|
843 | raise util.Abort( | |
843 | _('largefile %s not found in repo store or system cache') |
|
844 | _('largefile %s not found in repo store or system cache') | |
844 | % lfutil.splitstandin(f)) |
|
845 | % lfutil.splitstandin(f)) | |
845 | f = lfutil.splitstandin(f) |
|
846 | f = lfutil.splitstandin(f) | |
846 |
|
847 | |||
847 | def getdatafn(): |
|
848 | def getdatafn(): | |
848 | fd = None |
|
849 | fd = None | |
849 | try: |
|
850 | try: | |
850 | fd = open(path, 'rb') |
|
851 | fd = open(path, 'rb') | |
851 | return fd.read() |
|
852 | return fd.read() | |
852 | finally: |
|
853 | finally: | |
853 | if fd: |
|
854 | if fd: | |
854 | fd.close() |
|
855 | fd.close() | |
855 |
|
856 | |||
856 | getdata = getdatafn |
|
857 | getdata = getdatafn | |
857 | write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata) |
|
858 | write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata) | |
858 |
|
859 | |||
859 | if subrepos: |
|
860 | if subrepos: | |
860 | for subpath in sorted(ctx.substate): |
|
861 | for subpath in sorted(ctx.substate): | |
861 | sub = ctx.sub(subpath) |
|
862 | sub = ctx.sub(subpath) | |
862 | submatch = match_.narrowmatcher(subpath, matchfn) |
|
863 | submatch = match_.narrowmatcher(subpath, matchfn) | |
863 | sub.archive(repo.ui, archiver, prefix, submatch) |
|
864 | sub.archive(repo.ui, archiver, prefix, submatch) | |
864 |
|
865 | |||
865 | archiver.done() |
|
866 | archiver.done() | |
866 |
|
867 | |||
867 | def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None): |
|
868 | def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None): | |
868 | repo._get(repo._state + ('hg',)) |
|
869 | repo._get(repo._state + ('hg',)) | |
869 | rev = repo._state[1] |
|
870 | rev = repo._state[1] | |
870 | ctx = repo._repo[rev] |
|
871 | ctx = repo._repo[rev] | |
871 |
|
872 | |||
872 | lfcommands.cachelfiles(ui, repo._repo, ctx.node()) |
|
873 | lfcommands.cachelfiles(ui, repo._repo, ctx.node()) | |
873 |
|
874 | |||
874 | def write(name, mode, islink, getdata): |
|
875 | def write(name, mode, islink, getdata): | |
875 | # At this point, the standin has been replaced with the largefile name, |
|
876 | # At this point, the standin has been replaced with the largefile name, | |
876 | # so the normal matcher works here without the lfutil variants. |
|
877 | # so the normal matcher works here without the lfutil variants. | |
877 | if match and not match(f): |
|
878 | if match and not match(f): | |
878 | return |
|
879 | return | |
879 | data = getdata() |
|
880 | data = getdata() | |
880 |
|
881 | |||
881 | archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data) |
|
882 | archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data) | |
882 |
|
883 | |||
883 | for f in ctx: |
|
884 | for f in ctx: | |
884 | ff = ctx.flags(f) |
|
885 | ff = ctx.flags(f) | |
885 | getdata = ctx[f].data |
|
886 | getdata = ctx[f].data | |
886 | if lfutil.isstandin(f): |
|
887 | if lfutil.isstandin(f): | |
887 | path = lfutil.findfile(repo._repo, getdata().strip()) |
|
888 | path = lfutil.findfile(repo._repo, getdata().strip()) | |
888 | if path is None: |
|
889 | if path is None: | |
889 | raise util.Abort( |
|
890 | raise util.Abort( | |
890 | _('largefile %s not found in repo store or system cache') |
|
891 | _('largefile %s not found in repo store or system cache') | |
891 | % lfutil.splitstandin(f)) |
|
892 | % lfutil.splitstandin(f)) | |
892 | f = lfutil.splitstandin(f) |
|
893 | f = lfutil.splitstandin(f) | |
893 |
|
894 | |||
894 | def getdatafn(): |
|
895 | def getdatafn(): | |
895 | fd = None |
|
896 | fd = None | |
896 | try: |
|
897 | try: | |
897 | fd = open(os.path.join(prefix, path), 'rb') |
|
898 | fd = open(os.path.join(prefix, path), 'rb') | |
898 | return fd.read() |
|
899 | return fd.read() | |
899 | finally: |
|
900 | finally: | |
900 | if fd: |
|
901 | if fd: | |
901 | fd.close() |
|
902 | fd.close() | |
902 |
|
903 | |||
903 | getdata = getdatafn |
|
904 | getdata = getdatafn | |
904 |
|
905 | |||
905 | write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata) |
|
906 | write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata) | |
906 |
|
907 | |||
907 | for subpath in sorted(ctx.substate): |
|
908 | for subpath in sorted(ctx.substate): | |
908 | sub = ctx.sub(subpath) |
|
909 | sub = ctx.sub(subpath) | |
909 | submatch = match_.narrowmatcher(subpath, match) |
|
910 | submatch = match_.narrowmatcher(subpath, match) | |
910 | sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/', |
|
911 | sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/', | |
911 | submatch) |
|
912 | submatch) | |
912 |
|
913 | |||
913 | # If a largefile is modified, the change is not reflected in its |
|
914 | # If a largefile is modified, the change is not reflected in its | |
914 | # standin until a commit. cmdutil.bailifchanged() raises an exception |
|
915 | # standin until a commit. cmdutil.bailifchanged() raises an exception | |
915 | # if the repo has uncommitted changes. Wrap it to also check if |
|
916 | # if the repo has uncommitted changes. Wrap it to also check if | |
916 | # largefiles were changed. This is used by bisect and backout. |
|
917 | # largefiles were changed. This is used by bisect and backout. | |
917 | def overridebailifchanged(orig, repo): |
|
918 | def overridebailifchanged(orig, repo): | |
918 | orig(repo) |
|
919 | orig(repo) | |
919 | repo.lfstatus = True |
|
920 | repo.lfstatus = True | |
920 | modified, added, removed, deleted = repo.status()[:4] |
|
921 | modified, added, removed, deleted = repo.status()[:4] | |
921 | repo.lfstatus = False |
|
922 | repo.lfstatus = False | |
922 | if modified or added or removed or deleted: |
|
923 | if modified or added or removed or deleted: | |
923 | raise util.Abort(_('outstanding uncommitted changes')) |
|
924 | raise util.Abort(_('outstanding uncommitted changes')) | |
924 |
|
925 | |||
925 | # Fetch doesn't use cmdutil.bailifchanged so override it to add the check |
|
926 | # Fetch doesn't use cmdutil.bailifchanged so override it to add the check | |
926 | def overridefetch(orig, ui, repo, *pats, **opts): |
|
927 | def overridefetch(orig, ui, repo, *pats, **opts): | |
927 | repo.lfstatus = True |
|
928 | repo.lfstatus = True | |
928 | modified, added, removed, deleted = repo.status()[:4] |
|
929 | modified, added, removed, deleted = repo.status()[:4] | |
929 | repo.lfstatus = False |
|
930 | repo.lfstatus = False | |
930 | if modified or added or removed or deleted: |
|
931 | if modified or added or removed or deleted: | |
931 | raise util.Abort(_('outstanding uncommitted changes')) |
|
932 | raise util.Abort(_('outstanding uncommitted changes')) | |
932 | return orig(ui, repo, *pats, **opts) |
|
933 | return orig(ui, repo, *pats, **opts) | |
933 |
|
934 | |||
934 | def overrideforget(orig, ui, repo, *pats, **opts): |
|
935 | def overrideforget(orig, ui, repo, *pats, **opts): | |
935 | installnormalfilesmatchfn(repo[None].manifest()) |
|
936 | installnormalfilesmatchfn(repo[None].manifest()) | |
936 | result = orig(ui, repo, *pats, **opts) |
|
937 | result = orig(ui, repo, *pats, **opts) | |
937 | restorematchfn() |
|
938 | restorematchfn() | |
938 | m = scmutil.match(repo[None], pats, opts) |
|
939 | m = scmutil.match(repo[None], pats, opts) | |
939 |
|
940 | |||
940 | try: |
|
941 | try: | |
941 | repo.lfstatus = True |
|
942 | repo.lfstatus = True | |
942 | s = repo.status(match=m, clean=True) |
|
943 | s = repo.status(match=m, clean=True) | |
943 | finally: |
|
944 | finally: | |
944 | repo.lfstatus = False |
|
945 | repo.lfstatus = False | |
945 | forget = sorted(s[0] + s[1] + s[3] + s[6]) |
|
946 | forget = sorted(s[0] + s[1] + s[3] + s[6]) | |
946 | forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()] |
|
947 | forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()] | |
947 |
|
948 | |||
948 | for f in forget: |
|
949 | for f in forget: | |
949 | if lfutil.standin(f) not in repo.dirstate and not \ |
|
950 | if lfutil.standin(f) not in repo.dirstate and not \ | |
950 | os.path.isdir(m.rel(lfutil.standin(f))): |
|
951 | os.path.isdir(m.rel(lfutil.standin(f))): | |
951 | ui.warn(_('not removing %s: file is already untracked\n') |
|
952 | ui.warn(_('not removing %s: file is already untracked\n') | |
952 | % m.rel(f)) |
|
953 | % m.rel(f)) | |
953 | result = 1 |
|
954 | result = 1 | |
954 |
|
955 | |||
955 | for f in forget: |
|
956 | for f in forget: | |
956 | if ui.verbose or not m.exact(f): |
|
957 | if ui.verbose or not m.exact(f): | |
957 | ui.status(_('removing %s\n') % m.rel(f)) |
|
958 | ui.status(_('removing %s\n') % m.rel(f)) | |
958 |
|
959 | |||
959 | # Need to lock because standin files are deleted then removed from the |
|
960 | # Need to lock because standin files are deleted then removed from the | |
960 | # repository and we could race in-between. |
|
961 | # repository and we could race in-between. | |
961 | wlock = repo.wlock() |
|
962 | wlock = repo.wlock() | |
962 | try: |
|
963 | try: | |
963 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
964 | lfdirstate = lfutil.openlfdirstate(ui, repo) | |
964 | for f in forget: |
|
965 | for f in forget: | |
965 | if lfdirstate[f] == 'a': |
|
966 | if lfdirstate[f] == 'a': | |
966 | lfdirstate.drop(f) |
|
967 | lfdirstate.drop(f) | |
967 | else: |
|
968 | else: | |
968 | lfdirstate.remove(f) |
|
969 | lfdirstate.remove(f) | |
969 | lfdirstate.write() |
|
970 | lfdirstate.write() | |
970 | standins = [lfutil.standin(f) for f in forget] |
|
971 | standins = [lfutil.standin(f) for f in forget] | |
971 | for f in standins: |
|
972 | for f in standins: | |
972 | util.unlinkpath(repo.wjoin(f), ignoremissing=True) |
|
973 | util.unlinkpath(repo.wjoin(f), ignoremissing=True) | |
973 | repo[None].forget(standins) |
|
974 | repo[None].forget(standins) | |
974 | finally: |
|
975 | finally: | |
975 | wlock.release() |
|
976 | wlock.release() | |
976 |
|
977 | |||
977 | return result |
|
978 | return result | |
978 |
|
979 | |||
979 | def getoutgoinglfiles(ui, repo, dest=None, **opts): |
|
980 | def getoutgoinglfiles(ui, repo, dest=None, **opts): | |
980 | dest = ui.expandpath(dest or 'default-push', dest or 'default') |
|
981 | dest = ui.expandpath(dest or 'default-push', dest or 'default') | |
981 | dest, branches = hg.parseurl(dest, opts.get('branch')) |
|
982 | dest, branches = hg.parseurl(dest, opts.get('branch')) | |
982 | revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev')) |
|
983 | revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev')) | |
983 | if revs: |
|
984 | if revs: | |
984 | revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)] |
|
985 | revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)] | |
985 |
|
986 | |||
986 | try: |
|
987 | try: | |
987 | remote = hg.peer(repo, opts, dest) |
|
988 | remote = hg.peer(repo, opts, dest) | |
988 | except error.RepoError: |
|
989 | except error.RepoError: | |
989 | return None |
|
990 | return None | |
990 | outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=False) |
|
991 | outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=False) | |
991 | if not outgoing.missing: |
|
992 | if not outgoing.missing: | |
992 | return outgoing.missing |
|
993 | return outgoing.missing | |
993 | o = repo.changelog.nodesbetween(outgoing.missing, revs)[0] |
|
994 | o = repo.changelog.nodesbetween(outgoing.missing, revs)[0] | |
994 | if opts.get('newest_first'): |
|
995 | if opts.get('newest_first'): | |
995 | o.reverse() |
|
996 | o.reverse() | |
996 |
|
997 | |||
997 | toupload = set() |
|
998 | toupload = set() | |
998 | for n in o: |
|
999 | for n in o: | |
999 | parents = [p for p in repo.changelog.parents(n) if p != node.nullid] |
|
1000 | parents = [p for p in repo.changelog.parents(n) if p != node.nullid] | |
1000 | ctx = repo[n] |
|
1001 | ctx = repo[n] | |
1001 | files = set(ctx.files()) |
|
1002 | files = set(ctx.files()) | |
1002 | if len(parents) == 2: |
|
1003 | if len(parents) == 2: | |
1003 | mc = ctx.manifest() |
|
1004 | mc = ctx.manifest() | |
1004 | mp1 = ctx.parents()[0].manifest() |
|
1005 | mp1 = ctx.parents()[0].manifest() | |
1005 | mp2 = ctx.parents()[1].manifest() |
|
1006 | mp2 = ctx.parents()[1].manifest() | |
1006 | for f in mp1: |
|
1007 | for f in mp1: | |
1007 | if f not in mc: |
|
1008 | if f not in mc: | |
1008 | files.add(f) |
|
1009 | files.add(f) | |
1009 | for f in mp2: |
|
1010 | for f in mp2: | |
1010 | if f not in mc: |
|
1011 | if f not in mc: | |
1011 | files.add(f) |
|
1012 | files.add(f) | |
1012 | for f in mc: |
|
1013 | for f in mc: | |
1013 | if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): |
|
1014 | if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): | |
1014 | files.add(f) |
|
1015 | files.add(f) | |
1015 | toupload = toupload.union( |
|
1016 | toupload = toupload.union( | |
1016 | set([f for f in files if lfutil.isstandin(f) and f in ctx])) |
|
1017 | set([f for f in files if lfutil.isstandin(f) and f in ctx])) | |
1017 | return sorted(toupload) |
|
1018 | return sorted(toupload) | |
1018 |
|
1019 | |||
1019 | def overrideoutgoing(orig, ui, repo, dest=None, **opts): |
|
1020 | def overrideoutgoing(orig, ui, repo, dest=None, **opts): | |
1020 | result = orig(ui, repo, dest, **opts) |
|
1021 | result = orig(ui, repo, dest, **opts) | |
1021 |
|
1022 | |||
1022 | if opts.pop('large', None): |
|
1023 | if opts.pop('large', None): | |
1023 | toupload = getoutgoinglfiles(ui, repo, dest, **opts) |
|
1024 | toupload = getoutgoinglfiles(ui, repo, dest, **opts) | |
1024 | if toupload is None: |
|
1025 | if toupload is None: | |
1025 | ui.status(_('largefiles: No remote repo\n')) |
|
1026 | ui.status(_('largefiles: No remote repo\n')) | |
1026 | elif not toupload: |
|
1027 | elif not toupload: | |
1027 | ui.status(_('largefiles: no files to upload\n')) |
|
1028 | ui.status(_('largefiles: no files to upload\n')) | |
1028 | else: |
|
1029 | else: | |
1029 | ui.status(_('largefiles to upload:\n')) |
|
1030 | ui.status(_('largefiles to upload:\n')) | |
1030 | for file in toupload: |
|
1031 | for file in toupload: | |
1031 | ui.status(lfutil.splitstandin(file) + '\n') |
|
1032 | ui.status(lfutil.splitstandin(file) + '\n') | |
1032 | ui.status('\n') |
|
1033 | ui.status('\n') | |
1033 |
|
1034 | |||
1034 | return result |
|
1035 | return result | |
1035 |
|
1036 | |||
1036 | def overridesummary(orig, ui, repo, *pats, **opts): |
|
1037 | def overridesummary(orig, ui, repo, *pats, **opts): | |
1037 | try: |
|
1038 | try: | |
1038 | repo.lfstatus = True |
|
1039 | repo.lfstatus = True | |
1039 | orig(ui, repo, *pats, **opts) |
|
1040 | orig(ui, repo, *pats, **opts) | |
1040 | finally: |
|
1041 | finally: | |
1041 | repo.lfstatus = False |
|
1042 | repo.lfstatus = False | |
1042 |
|
1043 | |||
1043 | if opts.pop('large', None): |
|
1044 | if opts.pop('large', None): | |
1044 | toupload = getoutgoinglfiles(ui, repo, None, **opts) |
|
1045 | toupload = getoutgoinglfiles(ui, repo, None, **opts) | |
1045 | if toupload is None: |
|
1046 | if toupload is None: | |
1046 | # i18n: column positioning for "hg summary" |
|
1047 | # i18n: column positioning for "hg summary" | |
1047 | ui.status(_('largefiles: (no remote repo)\n')) |
|
1048 | ui.status(_('largefiles: (no remote repo)\n')) | |
1048 | elif not toupload: |
|
1049 | elif not toupload: | |
1049 | # i18n: column positioning for "hg summary" |
|
1050 | # i18n: column positioning for "hg summary" | |
1050 | ui.status(_('largefiles: (no files to upload)\n')) |
|
1051 | ui.status(_('largefiles: (no files to upload)\n')) | |
1051 | else: |
|
1052 | else: | |
1052 | # i18n: column positioning for "hg summary" |
|
1053 | # i18n: column positioning for "hg summary" | |
1053 | ui.status(_('largefiles: %d to upload\n') % len(toupload)) |
|
1054 | ui.status(_('largefiles: %d to upload\n') % len(toupload)) | |
1054 |
|
1055 | |||
1055 | def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None, |
|
1056 | def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None, | |
1056 | similarity=None): |
|
1057 | similarity=None): | |
1057 | if not lfutil.islfilesrepo(repo): |
|
1058 | if not lfutil.islfilesrepo(repo): | |
1058 | return orig(repo, pats, opts, dry_run, similarity) |
|
1059 | return orig(repo, pats, opts, dry_run, similarity) | |
1059 | # Get the list of missing largefiles so we can remove them |
|
1060 | # Get the list of missing largefiles so we can remove them | |
1060 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) |
|
1061 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) | |
1061 | s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, |
|
1062 | s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, | |
1062 | False, False) |
|
1063 | False, False) | |
1063 | (unsure, modified, added, removed, missing, unknown, ignored, clean) = s |
|
1064 | (unsure, modified, added, removed, missing, unknown, ignored, clean) = s | |
1064 |
|
1065 | |||
1065 | # Call into the normal remove code, but the removing of the standin, we want |
|
1066 | # Call into the normal remove code, but the removing of the standin, we want | |
1066 | # to have handled by original addremove. Monkey patching here makes sure |
|
1067 | # to have handled by original addremove. Monkey patching here makes sure | |
1067 | # we don't remove the standin in the largefiles code, preventing a very |
|
1068 | # we don't remove the standin in the largefiles code, preventing a very | |
1068 | # confused state later. |
|
1069 | # confused state later. | |
1069 | if missing: |
|
1070 | if missing: | |
1070 | m = [repo.wjoin(f) for f in missing] |
|
1071 | m = [repo.wjoin(f) for f in missing] | |
1071 | repo._isaddremove = True |
|
1072 | repo._isaddremove = True | |
1072 | removelargefiles(repo.ui, repo, *m, **opts) |
|
1073 | removelargefiles(repo.ui, repo, *m, **opts) | |
1073 | repo._isaddremove = False |
|
1074 | repo._isaddremove = False | |
1074 | # Call into the normal add code, and any files that *should* be added as |
|
1075 | # Call into the normal add code, and any files that *should* be added as | |
1075 | # largefiles will be |
|
1076 | # largefiles will be | |
1076 | addlargefiles(repo.ui, repo, *pats, **opts) |
|
1077 | addlargefiles(repo.ui, repo, *pats, **opts) | |
1077 | # Now that we've handled largefiles, hand off to the original addremove |
|
1078 | # Now that we've handled largefiles, hand off to the original addremove | |
1078 | # function to take care of the rest. Make sure it doesn't do anything with |
|
1079 | # function to take care of the rest. Make sure it doesn't do anything with | |
1079 | # largefiles by installing a matcher that will ignore them. |
|
1080 | # largefiles by installing a matcher that will ignore them. | |
1080 | installnormalfilesmatchfn(repo[None].manifest()) |
|
1081 | installnormalfilesmatchfn(repo[None].manifest()) | |
1081 | result = orig(repo, pats, opts, dry_run, similarity) |
|
1082 | result = orig(repo, pats, opts, dry_run, similarity) | |
1082 | restorematchfn() |
|
1083 | restorematchfn() | |
1083 | return result |
|
1084 | return result | |
1084 |
|
1085 | |||
1085 | # Calling purge with --all will cause the largefiles to be deleted. |
|
1086 | # Calling purge with --all will cause the largefiles to be deleted. | |
1086 | # Override repo.status to prevent this from happening. |
|
1087 | # Override repo.status to prevent this from happening. | |
1087 | def overridepurge(orig, ui, repo, *dirs, **opts): |
|
1088 | def overridepurge(orig, ui, repo, *dirs, **opts): | |
1088 | # XXX large file status is buggy when used on repo proxy. |
|
1089 | # XXX large file status is buggy when used on repo proxy. | |
1089 | # XXX this needs to be investigate. |
|
1090 | # XXX this needs to be investigate. | |
1090 | repo = repo.unfiltered() |
|
1091 | repo = repo.unfiltered() | |
1091 | oldstatus = repo.status |
|
1092 | oldstatus = repo.status | |
1092 | def overridestatus(node1='.', node2=None, match=None, ignored=False, |
|
1093 | def overridestatus(node1='.', node2=None, match=None, ignored=False, | |
1093 | clean=False, unknown=False, listsubrepos=False): |
|
1094 | clean=False, unknown=False, listsubrepos=False): | |
1094 | r = oldstatus(node1, node2, match, ignored, clean, unknown, |
|
1095 | r = oldstatus(node1, node2, match, ignored, clean, unknown, | |
1095 | listsubrepos) |
|
1096 | listsubrepos) | |
1096 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
1097 | lfdirstate = lfutil.openlfdirstate(ui, repo) | |
1097 | modified, added, removed, deleted, unknown, ignored, clean = r |
|
1098 | modified, added, removed, deleted, unknown, ignored, clean = r | |
1098 | unknown = [f for f in unknown if lfdirstate[f] == '?'] |
|
1099 | unknown = [f for f in unknown if lfdirstate[f] == '?'] | |
1099 | ignored = [f for f in ignored if lfdirstate[f] == '?'] |
|
1100 | ignored = [f for f in ignored if lfdirstate[f] == '?'] | |
1100 | return modified, added, removed, deleted, unknown, ignored, clean |
|
1101 | return modified, added, removed, deleted, unknown, ignored, clean | |
1101 | repo.status = overridestatus |
|
1102 | repo.status = overridestatus | |
1102 | orig(ui, repo, *dirs, **opts) |
|
1103 | orig(ui, repo, *dirs, **opts) | |
1103 | repo.status = oldstatus |
|
1104 | repo.status = oldstatus | |
1104 |
|
1105 | |||
1105 | def overriderollback(orig, ui, repo, **opts): |
|
1106 | def overriderollback(orig, ui, repo, **opts): | |
1106 | result = orig(ui, repo, **opts) |
|
1107 | result = orig(ui, repo, **opts) | |
1107 | merge.update(repo, node=None, branchmerge=False, force=True, |
|
1108 | merge.update(repo, node=None, branchmerge=False, force=True, | |
1108 | partial=lfutil.isstandin) |
|
1109 | partial=lfutil.isstandin) | |
1109 | wlock = repo.wlock() |
|
1110 | wlock = repo.wlock() | |
1110 | try: |
|
1111 | try: | |
1111 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
1112 | lfdirstate = lfutil.openlfdirstate(ui, repo) | |
1112 | lfiles = lfutil.listlfiles(repo) |
|
1113 | lfiles = lfutil.listlfiles(repo) | |
1113 | oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev()) |
|
1114 | oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev()) | |
1114 | for file in lfiles: |
|
1115 | for file in lfiles: | |
1115 | if file in oldlfiles: |
|
1116 | if file in oldlfiles: | |
1116 | lfdirstate.normallookup(file) |
|
1117 | lfdirstate.normallookup(file) | |
1117 | else: |
|
1118 | else: | |
1118 | lfdirstate.add(file) |
|
1119 | lfdirstate.add(file) | |
1119 | lfdirstate.write() |
|
1120 | lfdirstate.write() | |
1120 | finally: |
|
1121 | finally: | |
1121 | wlock.release() |
|
1122 | wlock.release() | |
1122 | return result |
|
1123 | return result | |
1123 |
|
1124 | |||
1124 | def overridetransplant(orig, ui, repo, *revs, **opts): |
|
1125 | def overridetransplant(orig, ui, repo, *revs, **opts): | |
1125 | try: |
|
1126 | try: | |
1126 | oldstandins = lfutil.getstandinsstate(repo) |
|
1127 | oldstandins = lfutil.getstandinsstate(repo) | |
1127 | repo._istransplanting = True |
|
1128 | repo._istransplanting = True | |
1128 | result = orig(ui, repo, *revs, **opts) |
|
1129 | result = orig(ui, repo, *revs, **opts) | |
1129 | newstandins = lfutil.getstandinsstate(repo) |
|
1130 | newstandins = lfutil.getstandinsstate(repo) | |
1130 | filelist = lfutil.getlfilestoupdate(oldstandins, newstandins) |
|
1131 | filelist = lfutil.getlfilestoupdate(oldstandins, newstandins) | |
1131 | lfcommands.updatelfiles(repo.ui, repo, filelist=filelist, |
|
1132 | lfcommands.updatelfiles(repo.ui, repo, filelist=filelist, | |
1132 | printmessage=True) |
|
1133 | printmessage=True) | |
1133 | finally: |
|
1134 | finally: | |
1134 | repo._istransplanting = False |
|
1135 | repo._istransplanting = False | |
1135 | return result |
|
1136 | return result | |
1136 |
|
1137 | |||
1137 | def overridecat(orig, ui, repo, file1, *pats, **opts): |
|
1138 | def overridecat(orig, ui, repo, file1, *pats, **opts): | |
1138 | ctx = scmutil.revsingle(repo, opts.get('rev')) |
|
1139 | ctx = scmutil.revsingle(repo, opts.get('rev')) | |
1139 | err = 1 |
|
1140 | err = 1 | |
1140 | notbad = set() |
|
1141 | notbad = set() | |
1141 | m = scmutil.match(ctx, (file1,) + pats, opts) |
|
1142 | m = scmutil.match(ctx, (file1,) + pats, opts) | |
1142 | origmatchfn = m.matchfn |
|
1143 | origmatchfn = m.matchfn | |
1143 | def lfmatchfn(f): |
|
1144 | def lfmatchfn(f): | |
1144 | lf = lfutil.splitstandin(f) |
|
1145 | lf = lfutil.splitstandin(f) | |
1145 | if lf is None: |
|
1146 | if lf is None: | |
1146 | return origmatchfn(f) |
|
1147 | return origmatchfn(f) | |
1147 | notbad.add(lf) |
|
1148 | notbad.add(lf) | |
1148 | return origmatchfn(lf) |
|
1149 | return origmatchfn(lf) | |
1149 | m.matchfn = lfmatchfn |
|
1150 | m.matchfn = lfmatchfn | |
1150 | m.bad = lambda f, msg: f not in notbad |
|
1151 | m.bad = lambda f, msg: f not in notbad | |
1151 | for f in ctx.walk(m): |
|
1152 | for f in ctx.walk(m): | |
1152 | lf = lfutil.splitstandin(f) |
|
1153 | lf = lfutil.splitstandin(f) | |
1153 | if lf is None: |
|
1154 | if lf is None: | |
1154 | err = orig(ui, repo, f, **opts) |
|
1155 | err = orig(ui, repo, f, **opts) | |
1155 | else: |
|
1156 | else: | |
1156 | err = lfcommands.catlfile(repo, lf, ctx.rev(), opts.get('output')) |
|
1157 | err = lfcommands.catlfile(repo, lf, ctx.rev(), opts.get('output')) | |
1157 | return err |
|
1158 | return err | |
1158 |
|
1159 | |||
1159 | def mercurialsinkbefore(orig, sink): |
|
1160 | def mercurialsinkbefore(orig, sink): | |
1160 | sink.repo._isconverting = True |
|
1161 | sink.repo._isconverting = True | |
1161 | orig(sink) |
|
1162 | orig(sink) | |
1162 |
|
1163 | |||
1163 | def mercurialsinkafter(orig, sink): |
|
1164 | def mercurialsinkafter(orig, sink): | |
1164 | sink.repo._isconverting = False |
|
1165 | sink.repo._isconverting = False | |
1165 | orig(sink) |
|
1166 | orig(sink) |
@@ -1,716 +1,724 | |||||
1 | # merge.py - directory-level update/merge handling for Mercurial |
|
1 | # merge.py - directory-level update/merge handling for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from node import nullid, nullrev, hex, bin |
|
8 | from node import nullid, nullrev, hex, bin | |
9 | from i18n import _ |
|
9 | from i18n import _ | |
10 | import error, util, filemerge, copies, subrepo, worker |
|
10 | import error, util, filemerge, copies, subrepo, worker | |
11 | import errno, os, shutil |
|
11 | import errno, os, shutil | |
12 |
|
12 | |||
13 | class mergestate(object): |
|
13 | class mergestate(object): | |
14 | '''track 3-way merge state of individual files''' |
|
14 | '''track 3-way merge state of individual files''' | |
15 | def __init__(self, repo): |
|
15 | def __init__(self, repo): | |
16 | self._repo = repo |
|
16 | self._repo = repo | |
17 | self._dirty = False |
|
17 | self._dirty = False | |
18 | self._read() |
|
18 | self._read() | |
19 | def reset(self, node=None): |
|
19 | def reset(self, node=None): | |
20 | self._state = {} |
|
20 | self._state = {} | |
21 | if node: |
|
21 | if node: | |
22 | self._local = node |
|
22 | self._local = node | |
23 | shutil.rmtree(self._repo.join("merge"), True) |
|
23 | shutil.rmtree(self._repo.join("merge"), True) | |
24 | self._dirty = False |
|
24 | self._dirty = False | |
25 | def _read(self): |
|
25 | def _read(self): | |
26 | self._state = {} |
|
26 | self._state = {} | |
27 | try: |
|
27 | try: | |
28 | f = self._repo.opener("merge/state") |
|
28 | f = self._repo.opener("merge/state") | |
29 | for i, l in enumerate(f): |
|
29 | for i, l in enumerate(f): | |
30 | if i == 0: |
|
30 | if i == 0: | |
31 | self._local = bin(l[:-1]) |
|
31 | self._local = bin(l[:-1]) | |
32 | else: |
|
32 | else: | |
33 | bits = l[:-1].split("\0") |
|
33 | bits = l[:-1].split("\0") | |
34 | self._state[bits[0]] = bits[1:] |
|
34 | self._state[bits[0]] = bits[1:] | |
35 | f.close() |
|
35 | f.close() | |
36 | except IOError, err: |
|
36 | except IOError, err: | |
37 | if err.errno != errno.ENOENT: |
|
37 | if err.errno != errno.ENOENT: | |
38 | raise |
|
38 | raise | |
39 | self._dirty = False |
|
39 | self._dirty = False | |
40 | def commit(self): |
|
40 | def commit(self): | |
41 | if self._dirty: |
|
41 | if self._dirty: | |
42 | f = self._repo.opener("merge/state", "w") |
|
42 | f = self._repo.opener("merge/state", "w") | |
43 | f.write(hex(self._local) + "\n") |
|
43 | f.write(hex(self._local) + "\n") | |
44 | for d, v in self._state.iteritems(): |
|
44 | for d, v in self._state.iteritems(): | |
45 | f.write("\0".join([d] + v) + "\n") |
|
45 | f.write("\0".join([d] + v) + "\n") | |
46 | f.close() |
|
46 | f.close() | |
47 | self._dirty = False |
|
47 | self._dirty = False | |
48 | def add(self, fcl, fco, fca, fd): |
|
48 | def add(self, fcl, fco, fca, fd): | |
49 | hash = util.sha1(fcl.path()).hexdigest() |
|
49 | hash = util.sha1(fcl.path()).hexdigest() | |
50 | self._repo.opener.write("merge/" + hash, fcl.data()) |
|
50 | self._repo.opener.write("merge/" + hash, fcl.data()) | |
51 | self._state[fd] = ['u', hash, fcl.path(), fca.path(), |
|
51 | self._state[fd] = ['u', hash, fcl.path(), fca.path(), | |
52 | hex(fca.filenode()), fco.path(), fcl.flags()] |
|
52 | hex(fca.filenode()), fco.path(), fcl.flags()] | |
53 | self._dirty = True |
|
53 | self._dirty = True | |
54 | def __contains__(self, dfile): |
|
54 | def __contains__(self, dfile): | |
55 | return dfile in self._state |
|
55 | return dfile in self._state | |
56 | def __getitem__(self, dfile): |
|
56 | def __getitem__(self, dfile): | |
57 | return self._state[dfile][0] |
|
57 | return self._state[dfile][0] | |
58 | def __iter__(self): |
|
58 | def __iter__(self): | |
59 | l = self._state.keys() |
|
59 | l = self._state.keys() | |
60 | l.sort() |
|
60 | l.sort() | |
61 | for f in l: |
|
61 | for f in l: | |
62 | yield f |
|
62 | yield f | |
63 | def mark(self, dfile, state): |
|
63 | def mark(self, dfile, state): | |
64 | self._state[dfile][0] = state |
|
64 | self._state[dfile][0] = state | |
65 | self._dirty = True |
|
65 | self._dirty = True | |
66 | def resolve(self, dfile, wctx, octx): |
|
66 | def resolve(self, dfile, wctx, octx): | |
67 | if self[dfile] == 'r': |
|
67 | if self[dfile] == 'r': | |
68 | return 0 |
|
68 | return 0 | |
69 | state, hash, lfile, afile, anode, ofile, flags = self._state[dfile] |
|
69 | state, hash, lfile, afile, anode, ofile, flags = self._state[dfile] | |
70 | fcd = wctx[dfile] |
|
70 | fcd = wctx[dfile] | |
71 | fco = octx[ofile] |
|
71 | fco = octx[ofile] | |
72 | fca = self._repo.filectx(afile, fileid=anode) |
|
72 | fca = self._repo.filectx(afile, fileid=anode) | |
73 | # "premerge" x flags |
|
73 | # "premerge" x flags | |
74 | flo = fco.flags() |
|
74 | flo = fco.flags() | |
75 | fla = fca.flags() |
|
75 | fla = fca.flags() | |
76 | if 'x' in flags + flo + fla and 'l' not in flags + flo + fla: |
|
76 | if 'x' in flags + flo + fla and 'l' not in flags + flo + fla: | |
77 | if fca.node() == nullid: |
|
77 | if fca.node() == nullid: | |
78 | self._repo.ui.warn(_('warning: cannot merge flags for %s\n') % |
|
78 | self._repo.ui.warn(_('warning: cannot merge flags for %s\n') % | |
79 | afile) |
|
79 | afile) | |
80 | elif flags == fla: |
|
80 | elif flags == fla: | |
81 | flags = flo |
|
81 | flags = flo | |
82 | # restore local |
|
82 | # restore local | |
83 | f = self._repo.opener("merge/" + hash) |
|
83 | f = self._repo.opener("merge/" + hash) | |
84 | self._repo.wwrite(dfile, f.read(), flags) |
|
84 | self._repo.wwrite(dfile, f.read(), flags) | |
85 | f.close() |
|
85 | f.close() | |
86 | r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca) |
|
86 | r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca) | |
87 | if r is None: |
|
87 | if r is None: | |
88 | # no real conflict |
|
88 | # no real conflict | |
89 | del self._state[dfile] |
|
89 | del self._state[dfile] | |
90 | elif not r: |
|
90 | elif not r: | |
91 | self.mark(dfile, 'r') |
|
91 | self.mark(dfile, 'r') | |
92 | return r |
|
92 | return r | |
93 |
|
93 | |||
94 | def _checkunknownfile(repo, wctx, mctx, f): |
|
94 | def _checkunknownfile(repo, wctx, mctx, f): | |
95 | return (not repo.dirstate._ignore(f) |
|
95 | return (not repo.dirstate._ignore(f) | |
96 | and os.path.isfile(repo.wjoin(f)) |
|
96 | and os.path.isfile(repo.wjoin(f)) | |
97 | and repo.dirstate.normalize(f) not in repo.dirstate |
|
97 | and repo.dirstate.normalize(f) not in repo.dirstate | |
98 | and mctx[f].cmp(wctx[f])) |
|
98 | and mctx[f].cmp(wctx[f])) | |
99 |
|
99 | |||
100 | def _checkunknown(repo, wctx, mctx): |
|
100 | def _checkunknown(repo, wctx, mctx): | |
101 | "check for collisions between unknown files and files in mctx" |
|
101 | "check for collisions between unknown files and files in mctx" | |
102 |
|
102 | |||
103 | error = False |
|
103 | error = False | |
104 | for f in mctx: |
|
104 | for f in mctx: | |
105 | if f not in wctx and _checkunknownfile(repo, wctx, mctx, f): |
|
105 | if f not in wctx and _checkunknownfile(repo, wctx, mctx, f): | |
106 | error = True |
|
106 | error = True | |
107 | wctx._repo.ui.warn(_("%s: untracked file differs\n") % f) |
|
107 | wctx._repo.ui.warn(_("%s: untracked file differs\n") % f) | |
108 | if error: |
|
108 | if error: | |
109 | raise util.Abort(_("untracked files in working directory differ " |
|
109 | raise util.Abort(_("untracked files in working directory differ " | |
110 | "from files in requested revision")) |
|
110 | "from files in requested revision")) | |
111 |
|
111 | |||
112 | def _remains(f, m, ma, workingctx=False): |
|
112 | def _remains(f, m, ma, workingctx=False): | |
113 | """check whether specified file remains after merge. |
|
113 | """check whether specified file remains after merge. | |
114 |
|
114 | |||
115 | It is assumed that specified file is not contained in the manifest |
|
115 | It is assumed that specified file is not contained in the manifest | |
116 | of the other context. |
|
116 | of the other context. | |
117 | """ |
|
117 | """ | |
118 | if f in ma: |
|
118 | if f in ma: | |
119 | n = m[f] |
|
119 | n = m[f] | |
120 | if n != ma[f]: |
|
120 | if n != ma[f]: | |
121 | return True # because it is changed locally |
|
121 | return True # because it is changed locally | |
122 | # even though it doesn't remain, if "remote deleted" is |
|
122 | # even though it doesn't remain, if "remote deleted" is | |
123 | # chosen in manifestmerge() |
|
123 | # chosen in manifestmerge() | |
124 | elif workingctx and n[20:] == "a": |
|
124 | elif workingctx and n[20:] == "a": | |
125 | return True # because it is added locally (linear merge specific) |
|
125 | return True # because it is added locally (linear merge specific) | |
126 | else: |
|
126 | else: | |
127 | return False # because it is removed remotely |
|
127 | return False # because it is removed remotely | |
128 | else: |
|
128 | else: | |
129 | return True # because it is added locally |
|
129 | return True # because it is added locally | |
130 |
|
130 | |||
131 | def _checkcollision(mctx, extractxs): |
|
131 | def _checkcollision(mctx, extractxs): | |
132 | "check for case folding collisions in the destination context" |
|
132 | "check for case folding collisions in the destination context" | |
133 | folded = {} |
|
133 | folded = {} | |
134 | for fn in mctx: |
|
134 | for fn in mctx: | |
135 | fold = util.normcase(fn) |
|
135 | fold = util.normcase(fn) | |
136 | if fold in folded: |
|
136 | if fold in folded: | |
137 | raise util.Abort(_("case-folding collision between %s and %s") |
|
137 | raise util.Abort(_("case-folding collision between %s and %s") | |
138 | % (fn, folded[fold])) |
|
138 | % (fn, folded[fold])) | |
139 | folded[fold] = fn |
|
139 | folded[fold] = fn | |
140 |
|
140 | |||
141 | if extractxs: |
|
141 | if extractxs: | |
142 | wctx, actx = extractxs |
|
142 | wctx, actx = extractxs | |
143 | # class to delay looking up copy mapping |
|
143 | # class to delay looking up copy mapping | |
144 | class pathcopies(object): |
|
144 | class pathcopies(object): | |
145 | @util.propertycache |
|
145 | @util.propertycache | |
146 | def map(self): |
|
146 | def map(self): | |
147 | # {dst@mctx: src@wctx} copy mapping |
|
147 | # {dst@mctx: src@wctx} copy mapping | |
148 | return copies.pathcopies(wctx, mctx) |
|
148 | return copies.pathcopies(wctx, mctx) | |
149 | pc = pathcopies() |
|
149 | pc = pathcopies() | |
150 |
|
150 | |||
151 | for fn in wctx: |
|
151 | for fn in wctx: | |
152 | fold = util.normcase(fn) |
|
152 | fold = util.normcase(fn) | |
153 | mfn = folded.get(fold, None) |
|
153 | mfn = folded.get(fold, None) | |
154 | if (mfn and mfn != fn and pc.map.get(mfn) != fn and |
|
154 | if (mfn and mfn != fn and pc.map.get(mfn) != fn and | |
155 | _remains(fn, wctx.manifest(), actx.manifest(), True) and |
|
155 | _remains(fn, wctx.manifest(), actx.manifest(), True) and | |
156 | _remains(mfn, mctx.manifest(), actx.manifest())): |
|
156 | _remains(mfn, mctx.manifest(), actx.manifest())): | |
157 | raise util.Abort(_("case-folding collision between %s and %s") |
|
157 | raise util.Abort(_("case-folding collision between %s and %s") | |
158 | % (mfn, fn)) |
|
158 | % (mfn, fn)) | |
159 |
|
159 | |||
160 | def _forgetremoved(wctx, mctx, branchmerge): |
|
160 | def _forgetremoved(wctx, mctx, branchmerge): | |
161 | """ |
|
161 | """ | |
162 | Forget removed files |
|
162 | Forget removed files | |
163 |
|
163 | |||
164 | If we're jumping between revisions (as opposed to merging), and if |
|
164 | If we're jumping between revisions (as opposed to merging), and if | |
165 | neither the working directory nor the target rev has the file, |
|
165 | neither the working directory nor the target rev has the file, | |
166 | then we need to remove it from the dirstate, to prevent the |
|
166 | then we need to remove it from the dirstate, to prevent the | |
167 | dirstate from listing the file when it is no longer in the |
|
167 | dirstate from listing the file when it is no longer in the | |
168 | manifest. |
|
168 | manifest. | |
169 |
|
169 | |||
170 | If we're merging, and the other revision has removed a file |
|
170 | If we're merging, and the other revision has removed a file | |
171 | that is not present in the working directory, we need to mark it |
|
171 | that is not present in the working directory, we need to mark it | |
172 | as removed. |
|
172 | as removed. | |
173 | """ |
|
173 | """ | |
174 |
|
174 | |||
175 | actions = [] |
|
175 | actions = [] | |
176 | state = branchmerge and 'r' or 'f' |
|
176 | state = branchmerge and 'r' or 'f' | |
177 | for f in wctx.deleted(): |
|
177 | for f in wctx.deleted(): | |
178 | if f not in mctx: |
|
178 | if f not in mctx: | |
179 | actions.append((f, state, None, "forget deleted")) |
|
179 | actions.append((f, state, None, "forget deleted")) | |
180 |
|
180 | |||
181 | if not branchmerge: |
|
181 | if not branchmerge: | |
182 | for f in wctx.removed(): |
|
182 | for f in wctx.removed(): | |
183 | if f not in mctx: |
|
183 | if f not in mctx: | |
184 | actions.append((f, "f", None, "forget removed")) |
|
184 | actions.append((f, "f", None, "forget removed")) | |
185 |
|
185 | |||
186 | return actions |
|
186 | return actions | |
187 |
|
187 | |||
188 |
def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial |
|
188 | def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial, | |
|
189 | acceptremote=False): | |||
189 | """ |
|
190 | """ | |
190 | Merge p1 and p2 with ancestor pa and generate merge action list |
|
191 | Merge p1 and p2 with ancestor pa and generate merge action list | |
191 |
|
192 | |||
192 | branchmerge and force are as passed in to update |
|
193 | branchmerge and force are as passed in to update | |
193 | partial = function to filter file lists |
|
194 | partial = function to filter file lists | |
|
195 | acceptremote = accept the incoming changes without prompting | |||
194 | """ |
|
196 | """ | |
195 |
|
197 | |||
196 | overwrite = force and not branchmerge |
|
198 | overwrite = force and not branchmerge | |
197 | actions, copy, movewithdir = [], {}, {} |
|
199 | actions, copy, movewithdir = [], {}, {} | |
198 |
|
200 | |||
199 | followcopies = False |
|
201 | followcopies = False | |
200 | if overwrite: |
|
202 | if overwrite: | |
201 | pa = wctx |
|
203 | pa = wctx | |
202 | elif pa == p2: # backwards |
|
204 | elif pa == p2: # backwards | |
203 | pa = wctx.p1() |
|
205 | pa = wctx.p1() | |
204 | elif not branchmerge and not wctx.dirty(missing=True): |
|
206 | elif not branchmerge and not wctx.dirty(missing=True): | |
205 | pass |
|
207 | pass | |
206 | elif pa and repo.ui.configbool("merge", "followcopies", True): |
|
208 | elif pa and repo.ui.configbool("merge", "followcopies", True): | |
207 | followcopies = True |
|
209 | followcopies = True | |
208 |
|
210 | |||
209 | # manifests fetched in order are going to be faster, so prime the caches |
|
211 | # manifests fetched in order are going to be faster, so prime the caches | |
210 | [x.manifest() for x in |
|
212 | [x.manifest() for x in | |
211 | sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())] |
|
213 | sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())] | |
212 |
|
214 | |||
213 | if followcopies: |
|
215 | if followcopies: | |
214 | ret = copies.mergecopies(repo, wctx, p2, pa) |
|
216 | ret = copies.mergecopies(repo, wctx, p2, pa) | |
215 | copy, movewithdir, diverge, renamedelete = ret |
|
217 | copy, movewithdir, diverge, renamedelete = ret | |
216 | for of, fl in diverge.iteritems(): |
|
218 | for of, fl in diverge.iteritems(): | |
217 | actions.append((of, "dr", (fl,), "divergent renames")) |
|
219 | actions.append((of, "dr", (fl,), "divergent renames")) | |
218 | for of, fl in renamedelete.iteritems(): |
|
220 | for of, fl in renamedelete.iteritems(): | |
219 | actions.append((of, "rd", (fl,), "rename and delete")) |
|
221 | actions.append((of, "rd", (fl,), "rename and delete")) | |
220 |
|
222 | |||
221 | repo.ui.note(_("resolving manifests\n")) |
|
223 | repo.ui.note(_("resolving manifests\n")) | |
222 | repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n" |
|
224 | repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n" | |
223 | % (bool(branchmerge), bool(force), bool(partial))) |
|
225 | % (bool(branchmerge), bool(force), bool(partial))) | |
224 | repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2)) |
|
226 | repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2)) | |
225 |
|
227 | |||
226 | m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest() |
|
228 | m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest() | |
227 | copied = set(copy.values()) |
|
229 | copied = set(copy.values()) | |
228 | copied.update(movewithdir.values()) |
|
230 | copied.update(movewithdir.values()) | |
229 |
|
231 | |||
230 | if '.hgsubstate' in m1: |
|
232 | if '.hgsubstate' in m1: | |
231 | # check whether sub state is modified |
|
233 | # check whether sub state is modified | |
232 | for s in sorted(wctx.substate): |
|
234 | for s in sorted(wctx.substate): | |
233 | if wctx.sub(s).dirty(): |
|
235 | if wctx.sub(s).dirty(): | |
234 | m1['.hgsubstate'] += "+" |
|
236 | m1['.hgsubstate'] += "+" | |
235 | break |
|
237 | break | |
236 |
|
238 | |||
237 | aborts, prompts = [], [] |
|
239 | aborts, prompts = [], [] | |
238 | # Compare manifests |
|
240 | # Compare manifests | |
239 | for f, n in m1.iteritems(): |
|
241 | for f, n in m1.iteritems(): | |
240 | if partial and not partial(f): |
|
242 | if partial and not partial(f): | |
241 | continue |
|
243 | continue | |
242 | if f in m2: |
|
244 | if f in m2: | |
243 | n2 = m2[f] |
|
245 | n2 = m2[f] | |
244 | fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f) |
|
246 | fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f) | |
245 | nol = 'l' not in fl1 + fl2 + fla |
|
247 | nol = 'l' not in fl1 + fl2 + fla | |
246 | a = ma.get(f, nullid) |
|
248 | a = ma.get(f, nullid) | |
247 | if n == n2 and fl1 == fl2: |
|
249 | if n == n2 and fl1 == fl2: | |
248 | pass # same - keep local |
|
250 | pass # same - keep local | |
249 | elif n2 == a and fl2 == fla: |
|
251 | elif n2 == a and fl2 == fla: | |
250 | pass # remote unchanged - keep local |
|
252 | pass # remote unchanged - keep local | |
251 | elif n == a and fl1 == fla: # local unchanged - use remote |
|
253 | elif n == a and fl1 == fla: # local unchanged - use remote | |
252 | if n == n2: # optimization: keep local content |
|
254 | if n == n2: # optimization: keep local content | |
253 | actions.append((f, "e", (fl2,), "update permissions")) |
|
255 | actions.append((f, "e", (fl2,), "update permissions")) | |
254 | else: |
|
256 | else: | |
255 | actions.append((f, "g", (fl2,), "remote is newer")) |
|
257 | actions.append((f, "g", (fl2,), "remote is newer")) | |
256 | elif nol and n2 == a: # remote only changed 'x' |
|
258 | elif nol and n2 == a: # remote only changed 'x' | |
257 | actions.append((f, "e", (fl2,), "update permissions")) |
|
259 | actions.append((f, "e", (fl2,), "update permissions")) | |
258 | elif nol and n == a: # local only changed 'x' |
|
260 | elif nol and n == a: # local only changed 'x' | |
259 | actions.append((f, "g", (fl1,), "remote is newer")) |
|
261 | actions.append((f, "g", (fl1,), "remote is newer")) | |
260 | else: # both changed something |
|
262 | else: # both changed something | |
261 | actions.append((f, "m", (f, f, False), "versions differ")) |
|
263 | actions.append((f, "m", (f, f, False), "versions differ")) | |
262 | elif f in copied: # files we'll deal with on m2 side |
|
264 | elif f in copied: # files we'll deal with on m2 side | |
263 | pass |
|
265 | pass | |
264 | elif f in movewithdir: # directory rename |
|
266 | elif f in movewithdir: # directory rename | |
265 | f2 = movewithdir[f] |
|
267 | f2 = movewithdir[f] | |
266 | actions.append((f, "d", (None, f2, m1.flags(f)), |
|
268 | actions.append((f, "d", (None, f2, m1.flags(f)), | |
267 | "remote renamed directory to " + f2)) |
|
269 | "remote renamed directory to " + f2)) | |
268 | elif f in copy: |
|
270 | elif f in copy: | |
269 | f2 = copy[f] |
|
271 | f2 = copy[f] | |
270 | actions.append((f, "m", (f2, f, False), |
|
272 | actions.append((f, "m", (f2, f, False), | |
271 | "local copied/moved to " + f2)) |
|
273 | "local copied/moved to " + f2)) | |
272 | elif f in ma: # clean, a different, no remote |
|
274 | elif f in ma: # clean, a different, no remote | |
273 | if n != ma[f]: |
|
275 | if n != ma[f]: | |
274 | prompts.append((f, "cd")) # prompt changed/deleted |
|
276 | prompts.append((f, "cd")) # prompt changed/deleted | |
275 | elif n[20:] == "a": # added, no remote |
|
277 | elif n[20:] == "a": # added, no remote | |
276 | actions.append((f, "f", None, "remote deleted")) |
|
278 | actions.append((f, "f", None, "remote deleted")) | |
277 | else: |
|
279 | else: | |
278 | actions.append((f, "r", None, "other deleted")) |
|
280 | actions.append((f, "r", None, "other deleted")) | |
279 |
|
281 | |||
280 | for f, n in m2.iteritems(): |
|
282 | for f, n in m2.iteritems(): | |
281 | if partial and not partial(f): |
|
283 | if partial and not partial(f): | |
282 | continue |
|
284 | continue | |
283 | if f in m1 or f in copied: # files already visited |
|
285 | if f in m1 or f in copied: # files already visited | |
284 | continue |
|
286 | continue | |
285 | if f in movewithdir: |
|
287 | if f in movewithdir: | |
286 | f2 = movewithdir[f] |
|
288 | f2 = movewithdir[f] | |
287 | actions.append((None, "d", (f, f2, m2.flags(f)), |
|
289 | actions.append((None, "d", (f, f2, m2.flags(f)), | |
288 | "local renamed directory to " + f2)) |
|
290 | "local renamed directory to " + f2)) | |
289 | elif f in copy: |
|
291 | elif f in copy: | |
290 | f2 = copy[f] |
|
292 | f2 = copy[f] | |
291 | if f2 in m2: |
|
293 | if f2 in m2: | |
292 | actions.append((f2, "m", (f, f, False), |
|
294 | actions.append((f2, "m", (f, f, False), | |
293 | "remote copied to " + f)) |
|
295 | "remote copied to " + f)) | |
294 | else: |
|
296 | else: | |
295 | actions.append((f2, "m", (f, f, True), |
|
297 | actions.append((f2, "m", (f, f, True), | |
296 | "remote moved to " + f)) |
|
298 | "remote moved to " + f)) | |
297 | elif f not in ma: |
|
299 | elif f not in ma: | |
298 | # local unknown, remote created: the logic is described by the |
|
300 | # local unknown, remote created: the logic is described by the | |
299 | # following table: |
|
301 | # following table: | |
300 | # |
|
302 | # | |
301 | # force branchmerge different | action |
|
303 | # force branchmerge different | action | |
302 | # n * n | get |
|
304 | # n * n | get | |
303 | # n * y | abort |
|
305 | # n * y | abort | |
304 | # y n * | get |
|
306 | # y n * | get | |
305 | # y y n | get |
|
307 | # y y n | get | |
306 | # y y y | merge |
|
308 | # y y y | merge | |
307 | # |
|
309 | # | |
308 | # Checking whether the files are different is expensive, so we |
|
310 | # Checking whether the files are different is expensive, so we | |
309 | # don't do that when we can avoid it. |
|
311 | # don't do that when we can avoid it. | |
310 | if force and not branchmerge: |
|
312 | if force and not branchmerge: | |
311 | actions.append((f, "g", (m2.flags(f),), "remote created")) |
|
313 | actions.append((f, "g", (m2.flags(f),), "remote created")) | |
312 | else: |
|
314 | else: | |
313 | different = _checkunknownfile(repo, wctx, p2, f) |
|
315 | different = _checkunknownfile(repo, wctx, p2, f) | |
314 | if force and branchmerge and different: |
|
316 | if force and branchmerge and different: | |
315 | actions.append((f, "m", (f, f, False), |
|
317 | actions.append((f, "m", (f, f, False), | |
316 | "remote differs from untracked local")) |
|
318 | "remote differs from untracked local")) | |
317 | elif not force and different: |
|
319 | elif not force and different: | |
318 | aborts.append((f, "ud")) |
|
320 | aborts.append((f, "ud")) | |
319 | else: |
|
321 | else: | |
320 | actions.append((f, "g", (m2.flags(f),), "remote created")) |
|
322 | actions.append((f, "g", (m2.flags(f),), "remote created")) | |
321 | elif n != ma[f]: |
|
323 | elif n != ma[f]: | |
322 | prompts.append((f, "dc")) # prompt deleted/changed |
|
324 | prompts.append((f, "dc")) # prompt deleted/changed | |
323 |
|
325 | |||
324 | for f, m in sorted(aborts): |
|
326 | for f, m in sorted(aborts): | |
325 | if m == "ud": |
|
327 | if m == "ud": | |
326 | repo.ui.warn(_("%s: untracked file differs\n") % f) |
|
328 | repo.ui.warn(_("%s: untracked file differs\n") % f) | |
327 | else: assert False, m |
|
329 | else: assert False, m | |
328 | if aborts: |
|
330 | if aborts: | |
329 | raise util.Abort(_("untracked files in working directory differ " |
|
331 | raise util.Abort(_("untracked files in working directory differ " | |
330 | "from files in requested revision")) |
|
332 | "from files in requested revision")) | |
331 |
|
333 | |||
332 | for f, m in sorted(prompts): |
|
334 | for f, m in sorted(prompts): | |
333 | if m == "cd": |
|
335 | if m == "cd": | |
334 |
if |
|
336 | if acceptremote: | |
|
337 | actions.append((f, "r", None, "remote delete")) | |||
|
338 | elif repo.ui.promptchoice( | |||
335 | _("local changed %s which remote deleted\n" |
|
339 | _("local changed %s which remote deleted\n" | |
336 | "use (c)hanged version or (d)elete?") % f, |
|
340 | "use (c)hanged version or (d)elete?") % f, | |
337 | (_("&Changed"), _("&Delete")), 0): |
|
341 | (_("&Changed"), _("&Delete")), 0): | |
338 | actions.append((f, "r", None, "prompt delete")) |
|
342 | actions.append((f, "r", None, "prompt delete")) | |
339 | else: |
|
343 | else: | |
340 | actions.append((f, "a", None, "prompt keep")) |
|
344 | actions.append((f, "a", None, "prompt keep")) | |
341 | elif m == "dc": |
|
345 | elif m == "dc": | |
342 |
if |
|
346 | if acceptremote: | |
|
347 | actions.append((f, "g", (m2.flags(f),), "remote recreating")) | |||
|
348 | elif repo.ui.promptchoice( | |||
343 | _("remote changed %s which local deleted\n" |
|
349 | _("remote changed %s which local deleted\n" | |
344 | "use (c)hanged version or leave (d)eleted?") % f, |
|
350 | "use (c)hanged version or leave (d)eleted?") % f, | |
345 | (_("&Changed"), _("&Deleted")), 0) == 0: |
|
351 | (_("&Changed"), _("&Deleted")), 0) == 0: | |
346 | actions.append((f, "g", (m2.flags(f),), "prompt recreating")) |
|
352 | actions.append((f, "g", (m2.flags(f),), "prompt recreating")) | |
347 | else: assert False, m |
|
353 | else: assert False, m | |
348 | return actions |
|
354 | return actions | |
349 |
|
355 | |||
350 | def actionkey(a): |
|
356 | def actionkey(a): | |
351 | return a[1] == "r" and -1 or 0, a |
|
357 | return a[1] == "r" and -1 or 0, a | |
352 |
|
358 | |||
353 | def getremove(repo, mctx, overwrite, args): |
|
359 | def getremove(repo, mctx, overwrite, args): | |
354 | """apply usually-non-interactive updates to the working directory |
|
360 | """apply usually-non-interactive updates to the working directory | |
355 |
|
361 | |||
356 | mctx is the context to be merged into the working copy |
|
362 | mctx is the context to be merged into the working copy | |
357 |
|
363 | |||
358 | yields tuples for progress updates |
|
364 | yields tuples for progress updates | |
359 | """ |
|
365 | """ | |
360 | verbose = repo.ui.verbose |
|
366 | verbose = repo.ui.verbose | |
361 | unlink = util.unlinkpath |
|
367 | unlink = util.unlinkpath | |
362 | wjoin = repo.wjoin |
|
368 | wjoin = repo.wjoin | |
363 | fctx = mctx.filectx |
|
369 | fctx = mctx.filectx | |
364 | wwrite = repo.wwrite |
|
370 | wwrite = repo.wwrite | |
365 | audit = repo.wopener.audit |
|
371 | audit = repo.wopener.audit | |
366 | i = 0 |
|
372 | i = 0 | |
367 | for arg in args: |
|
373 | for arg in args: | |
368 | f = arg[0] |
|
374 | f = arg[0] | |
369 | if arg[1] == 'r': |
|
375 | if arg[1] == 'r': | |
370 | if verbose: |
|
376 | if verbose: | |
371 | repo.ui.note(_("removing %s\n") % f) |
|
377 | repo.ui.note(_("removing %s\n") % f) | |
372 | audit(f) |
|
378 | audit(f) | |
373 | try: |
|
379 | try: | |
374 | unlink(wjoin(f), ignoremissing=True) |
|
380 | unlink(wjoin(f), ignoremissing=True) | |
375 | except OSError, inst: |
|
381 | except OSError, inst: | |
376 | repo.ui.warn(_("update failed to remove %s: %s!\n") % |
|
382 | repo.ui.warn(_("update failed to remove %s: %s!\n") % | |
377 | (f, inst.strerror)) |
|
383 | (f, inst.strerror)) | |
378 | else: |
|
384 | else: | |
379 | if verbose: |
|
385 | if verbose: | |
380 | repo.ui.note(_("getting %s\n") % f) |
|
386 | repo.ui.note(_("getting %s\n") % f) | |
381 | wwrite(f, fctx(f).data(), arg[2][0]) |
|
387 | wwrite(f, fctx(f).data(), arg[2][0]) | |
382 | if i == 100: |
|
388 | if i == 100: | |
383 | yield i, f |
|
389 | yield i, f | |
384 | i = 0 |
|
390 | i = 0 | |
385 | i += 1 |
|
391 | i += 1 | |
386 | if i > 0: |
|
392 | if i > 0: | |
387 | yield i, f |
|
393 | yield i, f | |
388 |
|
394 | |||
389 | def applyupdates(repo, actions, wctx, mctx, actx, overwrite): |
|
395 | def applyupdates(repo, actions, wctx, mctx, actx, overwrite): | |
390 | """apply the merge action list to the working directory |
|
396 | """apply the merge action list to the working directory | |
391 |
|
397 | |||
392 | wctx is the working copy context |
|
398 | wctx is the working copy context | |
393 | mctx is the context to be merged into the working copy |
|
399 | mctx is the context to be merged into the working copy | |
394 | actx is the context of the common ancestor |
|
400 | actx is the context of the common ancestor | |
395 |
|
401 | |||
396 | Return a tuple of counts (updated, merged, removed, unresolved) that |
|
402 | Return a tuple of counts (updated, merged, removed, unresolved) that | |
397 | describes how many files were affected by the update. |
|
403 | describes how many files were affected by the update. | |
398 | """ |
|
404 | """ | |
399 |
|
405 | |||
400 | updated, merged, removed, unresolved = 0, 0, 0, 0 |
|
406 | updated, merged, removed, unresolved = 0, 0, 0, 0 | |
401 | ms = mergestate(repo) |
|
407 | ms = mergestate(repo) | |
402 | ms.reset(wctx.p1().node()) |
|
408 | ms.reset(wctx.p1().node()) | |
403 | moves = [] |
|
409 | moves = [] | |
404 | actions.sort(key=actionkey) |
|
410 | actions.sort(key=actionkey) | |
405 |
|
411 | |||
406 | # prescan for merges |
|
412 | # prescan for merges | |
407 | for a in actions: |
|
413 | for a in actions: | |
408 | f, m, args, msg = a |
|
414 | f, m, args, msg = a | |
409 | repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m)) |
|
415 | repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m)) | |
410 | if m == "m": # merge |
|
416 | if m == "m": # merge | |
411 | f2, fd, move = args |
|
417 | f2, fd, move = args | |
412 | if fd == '.hgsubstate': # merged internally |
|
418 | if fd == '.hgsubstate': # merged internally | |
413 | continue |
|
419 | continue | |
414 | repo.ui.debug(" preserving %s for resolve of %s\n" % (f, fd)) |
|
420 | repo.ui.debug(" preserving %s for resolve of %s\n" % (f, fd)) | |
415 | fcl = wctx[f] |
|
421 | fcl = wctx[f] | |
416 | fco = mctx[f2] |
|
422 | fco = mctx[f2] | |
417 | if mctx == actx: # backwards, use working dir parent as ancestor |
|
423 | if mctx == actx: # backwards, use working dir parent as ancestor | |
418 | if fcl.parents(): |
|
424 | if fcl.parents(): | |
419 | fca = fcl.p1() |
|
425 | fca = fcl.p1() | |
420 | else: |
|
426 | else: | |
421 | fca = repo.filectx(f, fileid=nullrev) |
|
427 | fca = repo.filectx(f, fileid=nullrev) | |
422 | else: |
|
428 | else: | |
423 | fca = fcl.ancestor(fco, actx) |
|
429 | fca = fcl.ancestor(fco, actx) | |
424 | if not fca: |
|
430 | if not fca: | |
425 | fca = repo.filectx(f, fileid=nullrev) |
|
431 | fca = repo.filectx(f, fileid=nullrev) | |
426 | ms.add(fcl, fco, fca, fd) |
|
432 | ms.add(fcl, fco, fca, fd) | |
427 | if f != fd and move: |
|
433 | if f != fd and move: | |
428 | moves.append(f) |
|
434 | moves.append(f) | |
429 |
|
435 | |||
430 | audit = repo.wopener.audit |
|
436 | audit = repo.wopener.audit | |
431 |
|
437 | |||
432 | # remove renamed files after safely stored |
|
438 | # remove renamed files after safely stored | |
433 | for f in moves: |
|
439 | for f in moves: | |
434 | if os.path.lexists(repo.wjoin(f)): |
|
440 | if os.path.lexists(repo.wjoin(f)): | |
435 | repo.ui.debug("removing %s\n" % f) |
|
441 | repo.ui.debug("removing %s\n" % f) | |
436 | audit(f) |
|
442 | audit(f) | |
437 | util.unlinkpath(repo.wjoin(f)) |
|
443 | util.unlinkpath(repo.wjoin(f)) | |
438 |
|
444 | |||
439 | numupdates = len(actions) |
|
445 | numupdates = len(actions) | |
440 | workeractions = [a for a in actions if a[1] in 'gr'] |
|
446 | workeractions = [a for a in actions if a[1] in 'gr'] | |
441 | updated = len([a for a in workeractions if a[1] == 'g']) |
|
447 | updated = len([a for a in workeractions if a[1] == 'g']) | |
442 | removed = len([a for a in workeractions if a[1] == 'r']) |
|
448 | removed = len([a for a in workeractions if a[1] == 'r']) | |
443 | actions = [a for a in actions if a[1] not in 'gr'] |
|
449 | actions = [a for a in actions if a[1] not in 'gr'] | |
444 |
|
450 | |||
445 | hgsub = [a[1] for a in workeractions if a[0] == '.hgsubstate'] |
|
451 | hgsub = [a[1] for a in workeractions if a[0] == '.hgsubstate'] | |
446 | if hgsub and hgsub[0] == 'r': |
|
452 | if hgsub and hgsub[0] == 'r': | |
447 | subrepo.submerge(repo, wctx, mctx, wctx, overwrite) |
|
453 | subrepo.submerge(repo, wctx, mctx, wctx, overwrite) | |
448 |
|
454 | |||
449 | z = 0 |
|
455 | z = 0 | |
450 | prog = worker.worker(repo.ui, 0.001, getremove, (repo, mctx, overwrite), |
|
456 | prog = worker.worker(repo.ui, 0.001, getremove, (repo, mctx, overwrite), | |
451 | workeractions) |
|
457 | workeractions) | |
452 | for i, item in prog: |
|
458 | for i, item in prog: | |
453 | z += i |
|
459 | z += i | |
454 | repo.ui.progress(_('updating'), z, item=item, total=numupdates, |
|
460 | repo.ui.progress(_('updating'), z, item=item, total=numupdates, | |
455 | unit=_('files')) |
|
461 | unit=_('files')) | |
456 |
|
462 | |||
457 | if hgsub and hgsub[0] == 'g': |
|
463 | if hgsub and hgsub[0] == 'g': | |
458 | subrepo.submerge(repo, wctx, mctx, wctx, overwrite) |
|
464 | subrepo.submerge(repo, wctx, mctx, wctx, overwrite) | |
459 |
|
465 | |||
460 | _updating = _('updating') |
|
466 | _updating = _('updating') | |
461 | _files = _('files') |
|
467 | _files = _('files') | |
462 | progress = repo.ui.progress |
|
468 | progress = repo.ui.progress | |
463 |
|
469 | |||
464 | for i, a in enumerate(actions): |
|
470 | for i, a in enumerate(actions): | |
465 | f, m, args, msg = a |
|
471 | f, m, args, msg = a | |
466 | progress(_updating, z + i + 1, item=f, total=numupdates, unit=_files) |
|
472 | progress(_updating, z + i + 1, item=f, total=numupdates, unit=_files) | |
467 | if m == "m": # merge |
|
473 | if m == "m": # merge | |
468 | if fd == '.hgsubstate': # subrepo states need updating |
|
474 | if fd == '.hgsubstate': # subrepo states need updating | |
469 | subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), |
|
475 | subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), | |
470 | overwrite) |
|
476 | overwrite) | |
471 | continue |
|
477 | continue | |
472 | f2, fd, move = args |
|
478 | f2, fd, move = args | |
473 | audit(fd) |
|
479 | audit(fd) | |
474 | r = ms.resolve(fd, wctx, mctx) |
|
480 | r = ms.resolve(fd, wctx, mctx) | |
475 | if r is not None and r > 0: |
|
481 | if r is not None and r > 0: | |
476 | unresolved += 1 |
|
482 | unresolved += 1 | |
477 | else: |
|
483 | else: | |
478 | if r is None: |
|
484 | if r is None: | |
479 | updated += 1 |
|
485 | updated += 1 | |
480 | else: |
|
486 | else: | |
481 | merged += 1 |
|
487 | merged += 1 | |
482 | elif m == "d": # directory rename |
|
488 | elif m == "d": # directory rename | |
483 | f2, fd, flags = args |
|
489 | f2, fd, flags = args | |
484 | if f: |
|
490 | if f: | |
485 | repo.ui.note(_("moving %s to %s\n") % (f, fd)) |
|
491 | repo.ui.note(_("moving %s to %s\n") % (f, fd)) | |
486 | audit(f) |
|
492 | audit(f) | |
487 | repo.wwrite(fd, wctx.filectx(f).data(), flags) |
|
493 | repo.wwrite(fd, wctx.filectx(f).data(), flags) | |
488 | util.unlinkpath(repo.wjoin(f)) |
|
494 | util.unlinkpath(repo.wjoin(f)) | |
489 | if f2: |
|
495 | if f2: | |
490 | repo.ui.note(_("getting %s to %s\n") % (f2, fd)) |
|
496 | repo.ui.note(_("getting %s to %s\n") % (f2, fd)) | |
491 | repo.wwrite(fd, mctx.filectx(f2).data(), flags) |
|
497 | repo.wwrite(fd, mctx.filectx(f2).data(), flags) | |
492 | updated += 1 |
|
498 | updated += 1 | |
493 | elif m == "dr": # divergent renames |
|
499 | elif m == "dr": # divergent renames | |
494 | fl, = args |
|
500 | fl, = args | |
495 | repo.ui.warn(_("note: possible conflict - %s was renamed " |
|
501 | repo.ui.warn(_("note: possible conflict - %s was renamed " | |
496 | "multiple times to:\n") % f) |
|
502 | "multiple times to:\n") % f) | |
497 | for nf in fl: |
|
503 | for nf in fl: | |
498 | repo.ui.warn(" %s\n" % nf) |
|
504 | repo.ui.warn(" %s\n" % nf) | |
499 | elif m == "rd": # rename and delete |
|
505 | elif m == "rd": # rename and delete | |
500 | fl, = args |
|
506 | fl, = args | |
501 | repo.ui.warn(_("note: possible conflict - %s was deleted " |
|
507 | repo.ui.warn(_("note: possible conflict - %s was deleted " | |
502 | "and renamed to:\n") % f) |
|
508 | "and renamed to:\n") % f) | |
503 | for nf in fl: |
|
509 | for nf in fl: | |
504 | repo.ui.warn(" %s\n" % nf) |
|
510 | repo.ui.warn(" %s\n" % nf) | |
505 | elif m == "e": # exec |
|
511 | elif m == "e": # exec | |
506 | flags, = args |
|
512 | flags, = args | |
507 | audit(f) |
|
513 | audit(f) | |
508 | util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags) |
|
514 | util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags) | |
509 | updated += 1 |
|
515 | updated += 1 | |
510 | ms.commit() |
|
516 | ms.commit() | |
511 | progress(_updating, None, total=numupdates, unit=_files) |
|
517 | progress(_updating, None, total=numupdates, unit=_files) | |
512 |
|
518 | |||
513 | return updated, merged, removed, unresolved |
|
519 | return updated, merged, removed, unresolved | |
514 |
|
520 | |||
515 |
def calculateupdates(repo, tctx, mctx, ancestor, branchmerge, force, partial |
|
521 | def calculateupdates(repo, tctx, mctx, ancestor, branchmerge, force, partial, | |
|
522 | acceptremote=False): | |||
516 | "Calculate the actions needed to merge mctx into tctx" |
|
523 | "Calculate the actions needed to merge mctx into tctx" | |
517 | actions = [] |
|
524 | actions = [] | |
518 | folding = not util.checkcase(repo.path) |
|
525 | folding = not util.checkcase(repo.path) | |
519 | if folding: |
|
526 | if folding: | |
520 | # collision check is not needed for clean update |
|
527 | # collision check is not needed for clean update | |
521 | if (not branchmerge and |
|
528 | if (not branchmerge and | |
522 | (force or not tctx.dirty(missing=True, branch=False))): |
|
529 | (force or not tctx.dirty(missing=True, branch=False))): | |
523 | _checkcollision(mctx, None) |
|
530 | _checkcollision(mctx, None) | |
524 | else: |
|
531 | else: | |
525 | _checkcollision(mctx, (tctx, ancestor)) |
|
532 | _checkcollision(mctx, (tctx, ancestor)) | |
526 | actions += manifestmerge(repo, tctx, mctx, |
|
533 | actions += manifestmerge(repo, tctx, mctx, | |
527 | ancestor, |
|
534 | ancestor, | |
528 | branchmerge, force, |
|
535 | branchmerge, force, | |
529 | partial) |
|
536 | partial, acceptremote) | |
530 | if tctx.rev() is None: |
|
537 | if tctx.rev() is None: | |
531 | actions += _forgetremoved(tctx, mctx, branchmerge) |
|
538 | actions += _forgetremoved(tctx, mctx, branchmerge) | |
532 | return actions |
|
539 | return actions | |
533 |
|
540 | |||
534 | def recordupdates(repo, actions, branchmerge): |
|
541 | def recordupdates(repo, actions, branchmerge): | |
535 | "record merge actions to the dirstate" |
|
542 | "record merge actions to the dirstate" | |
536 |
|
543 | |||
537 | for a in actions: |
|
544 | for a in actions: | |
538 | f, m, args, msg = a |
|
545 | f, m, args, msg = a | |
539 | if m == "r": # remove |
|
546 | if m == "r": # remove | |
540 | if branchmerge: |
|
547 | if branchmerge: | |
541 | repo.dirstate.remove(f) |
|
548 | repo.dirstate.remove(f) | |
542 | else: |
|
549 | else: | |
543 | repo.dirstate.drop(f) |
|
550 | repo.dirstate.drop(f) | |
544 | elif m == "a": # re-add |
|
551 | elif m == "a": # re-add | |
545 | if not branchmerge: |
|
552 | if not branchmerge: | |
546 | repo.dirstate.add(f) |
|
553 | repo.dirstate.add(f) | |
547 | elif m == "f": # forget |
|
554 | elif m == "f": # forget | |
548 | repo.dirstate.drop(f) |
|
555 | repo.dirstate.drop(f) | |
549 | elif m == "e": # exec change |
|
556 | elif m == "e": # exec change | |
550 | repo.dirstate.normallookup(f) |
|
557 | repo.dirstate.normallookup(f) | |
551 | elif m == "g": # get |
|
558 | elif m == "g": # get | |
552 | if branchmerge: |
|
559 | if branchmerge: | |
553 | repo.dirstate.otherparent(f) |
|
560 | repo.dirstate.otherparent(f) | |
554 | else: |
|
561 | else: | |
555 | repo.dirstate.normal(f) |
|
562 | repo.dirstate.normal(f) | |
556 | elif m == "m": # merge |
|
563 | elif m == "m": # merge | |
557 | f2, fd, move = args |
|
564 | f2, fd, move = args | |
558 | if branchmerge: |
|
565 | if branchmerge: | |
559 | # We've done a branch merge, mark this file as merged |
|
566 | # We've done a branch merge, mark this file as merged | |
560 | # so that we properly record the merger later |
|
567 | # so that we properly record the merger later | |
561 | repo.dirstate.merge(fd) |
|
568 | repo.dirstate.merge(fd) | |
562 | if f != f2: # copy/rename |
|
569 | if f != f2: # copy/rename | |
563 | if move: |
|
570 | if move: | |
564 | repo.dirstate.remove(f) |
|
571 | repo.dirstate.remove(f) | |
565 | if f != fd: |
|
572 | if f != fd: | |
566 | repo.dirstate.copy(f, fd) |
|
573 | repo.dirstate.copy(f, fd) | |
567 | else: |
|
574 | else: | |
568 | repo.dirstate.copy(f2, fd) |
|
575 | repo.dirstate.copy(f2, fd) | |
569 | else: |
|
576 | else: | |
570 | # We've update-merged a locally modified file, so |
|
577 | # We've update-merged a locally modified file, so | |
571 | # we set the dirstate to emulate a normal checkout |
|
578 | # we set the dirstate to emulate a normal checkout | |
572 | # of that file some time in the past. Thus our |
|
579 | # of that file some time in the past. Thus our | |
573 | # merge will appear as a normal local file |
|
580 | # merge will appear as a normal local file | |
574 | # modification. |
|
581 | # modification. | |
575 | if f2 == fd: # file not locally copied/moved |
|
582 | if f2 == fd: # file not locally copied/moved | |
576 | repo.dirstate.normallookup(fd) |
|
583 | repo.dirstate.normallookup(fd) | |
577 | if move: |
|
584 | if move: | |
578 | repo.dirstate.drop(f) |
|
585 | repo.dirstate.drop(f) | |
579 | elif m == "d": # directory rename |
|
586 | elif m == "d": # directory rename | |
580 | f2, fd, flag = args |
|
587 | f2, fd, flag = args | |
581 | if not f2 and f not in repo.dirstate: |
|
588 | if not f2 and f not in repo.dirstate: | |
582 | # untracked file moved |
|
589 | # untracked file moved | |
583 | continue |
|
590 | continue | |
584 | if branchmerge: |
|
591 | if branchmerge: | |
585 | repo.dirstate.add(fd) |
|
592 | repo.dirstate.add(fd) | |
586 | if f: |
|
593 | if f: | |
587 | repo.dirstate.remove(f) |
|
594 | repo.dirstate.remove(f) | |
588 | repo.dirstate.copy(f, fd) |
|
595 | repo.dirstate.copy(f, fd) | |
589 | if f2: |
|
596 | if f2: | |
590 | repo.dirstate.copy(f2, fd) |
|
597 | repo.dirstate.copy(f2, fd) | |
591 | else: |
|
598 | else: | |
592 | repo.dirstate.normal(fd) |
|
599 | repo.dirstate.normal(fd) | |
593 | if f: |
|
600 | if f: | |
594 | repo.dirstate.drop(f) |
|
601 | repo.dirstate.drop(f) | |
595 |
|
602 | |||
596 | def update(repo, node, branchmerge, force, partial, ancestor=None, |
|
603 | def update(repo, node, branchmerge, force, partial, ancestor=None, | |
597 | mergeancestor=False): |
|
604 | mergeancestor=False): | |
598 | """ |
|
605 | """ | |
599 | Perform a merge between the working directory and the given node |
|
606 | Perform a merge between the working directory and the given node | |
600 |
|
607 | |||
601 | node = the node to update to, or None if unspecified |
|
608 | node = the node to update to, or None if unspecified | |
602 | branchmerge = whether to merge between branches |
|
609 | branchmerge = whether to merge between branches | |
603 | force = whether to force branch merging or file overwriting |
|
610 | force = whether to force branch merging or file overwriting | |
604 | partial = a function to filter file lists (dirstate not updated) |
|
611 | partial = a function to filter file lists (dirstate not updated) | |
605 |
mergeancestor = |
|
612 | mergeancestor = whether it is merging with an ancestor. If true, | |
606 | is only allowed between different named branches. This flag |
|
613 | we should accept the incoming changes for any prompts that occur. | |
607 | is used by rebase extension as a temporary fix and should be |
|
614 | If false, merging with an ancestor (fast-forward) is only allowed | |
608 | avoided in general. |
|
615 | between different named branches. This flag is used by rebase extension | |
|
616 | as a temporary fix and should be avoided in general. | |||
609 |
|
617 | |||
610 | The table below shows all the behaviors of the update command |
|
618 | The table below shows all the behaviors of the update command | |
611 | given the -c and -C or no options, whether the working directory |
|
619 | given the -c and -C or no options, whether the working directory | |
612 | is dirty, whether a revision is specified, and the relationship of |
|
620 | is dirty, whether a revision is specified, and the relationship of | |
613 | the parent rev to the target rev (linear, on the same named |
|
621 | the parent rev to the target rev (linear, on the same named | |
614 | branch, or on another named branch). |
|
622 | branch, or on another named branch). | |
615 |
|
623 | |||
616 | This logic is tested by test-update-branches.t. |
|
624 | This logic is tested by test-update-branches.t. | |
617 |
|
625 | |||
618 | -c -C dirty rev | linear same cross |
|
626 | -c -C dirty rev | linear same cross | |
619 | n n n n | ok (1) x |
|
627 | n n n n | ok (1) x | |
620 | n n n y | ok ok ok |
|
628 | n n n y | ok ok ok | |
621 | n n y * | merge (2) (2) |
|
629 | n n y * | merge (2) (2) | |
622 | n y * * | --- discard --- |
|
630 | n y * * | --- discard --- | |
623 | y n y * | --- (3) --- |
|
631 | y n y * | --- (3) --- | |
624 | y n n * | --- ok --- |
|
632 | y n n * | --- ok --- | |
625 | y y * * | --- (4) --- |
|
633 | y y * * | --- (4) --- | |
626 |
|
634 | |||
627 | x = can't happen |
|
635 | x = can't happen | |
628 | * = don't-care |
|
636 | * = don't-care | |
629 | 1 = abort: crosses branches (use 'hg merge' or 'hg update -c') |
|
637 | 1 = abort: crosses branches (use 'hg merge' or 'hg update -c') | |
630 | 2 = abort: crosses branches (use 'hg merge' to merge or |
|
638 | 2 = abort: crosses branches (use 'hg merge' to merge or | |
631 | use 'hg update -C' to discard changes) |
|
639 | use 'hg update -C' to discard changes) | |
632 | 3 = abort: uncommitted local changes |
|
640 | 3 = abort: uncommitted local changes | |
633 | 4 = incompatible options (checked in commands.py) |
|
641 | 4 = incompatible options (checked in commands.py) | |
634 |
|
642 | |||
635 | Return the same tuple as applyupdates(). |
|
643 | Return the same tuple as applyupdates(). | |
636 | """ |
|
644 | """ | |
637 |
|
645 | |||
638 | onode = node |
|
646 | onode = node | |
639 | wlock = repo.wlock() |
|
647 | wlock = repo.wlock() | |
640 | try: |
|
648 | try: | |
641 | wc = repo[None] |
|
649 | wc = repo[None] | |
642 | if node is None: |
|
650 | if node is None: | |
643 | # tip of current branch |
|
651 | # tip of current branch | |
644 | try: |
|
652 | try: | |
645 | node = repo.branchtip(wc.branch()) |
|
653 | node = repo.branchtip(wc.branch()) | |
646 | except error.RepoLookupError: |
|
654 | except error.RepoLookupError: | |
647 | if wc.branch() == "default": # no default branch! |
|
655 | if wc.branch() == "default": # no default branch! | |
648 | node = repo.lookup("tip") # update to tip |
|
656 | node = repo.lookup("tip") # update to tip | |
649 | else: |
|
657 | else: | |
650 | raise util.Abort(_("branch %s not found") % wc.branch()) |
|
658 | raise util.Abort(_("branch %s not found") % wc.branch()) | |
651 | overwrite = force and not branchmerge |
|
659 | overwrite = force and not branchmerge | |
652 | pl = wc.parents() |
|
660 | pl = wc.parents() | |
653 | p1, p2 = pl[0], repo[node] |
|
661 | p1, p2 = pl[0], repo[node] | |
654 | if ancestor: |
|
662 | if ancestor: | |
655 | pa = repo[ancestor] |
|
663 | pa = repo[ancestor] | |
656 | else: |
|
664 | else: | |
657 | pa = p1.ancestor(p2) |
|
665 | pa = p1.ancestor(p2) | |
658 |
|
666 | |||
659 | fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2) |
|
667 | fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2) | |
660 |
|
668 | |||
661 | ### check phase |
|
669 | ### check phase | |
662 | if not overwrite and len(pl) > 1: |
|
670 | if not overwrite and len(pl) > 1: | |
663 | raise util.Abort(_("outstanding uncommitted merges")) |
|
671 | raise util.Abort(_("outstanding uncommitted merges")) | |
664 | if branchmerge: |
|
672 | if branchmerge: | |
665 | if pa == p2: |
|
673 | if pa == p2: | |
666 | raise util.Abort(_("merging with a working directory ancestor" |
|
674 | raise util.Abort(_("merging with a working directory ancestor" | |
667 | " has no effect")) |
|
675 | " has no effect")) | |
668 | elif pa == p1: |
|
676 | elif pa == p1: | |
669 | if not mergeancestor and p1.branch() == p2.branch(): |
|
677 | if not mergeancestor and p1.branch() == p2.branch(): | |
670 | raise util.Abort(_("nothing to merge"), |
|
678 | raise util.Abort(_("nothing to merge"), | |
671 | hint=_("use 'hg update' " |
|
679 | hint=_("use 'hg update' " | |
672 | "or check 'hg heads'")) |
|
680 | "or check 'hg heads'")) | |
673 | if not force and (wc.files() or wc.deleted()): |
|
681 | if not force and (wc.files() or wc.deleted()): | |
674 | raise util.Abort(_("outstanding uncommitted changes"), |
|
682 | raise util.Abort(_("outstanding uncommitted changes"), | |
675 | hint=_("use 'hg status' to list changes")) |
|
683 | hint=_("use 'hg status' to list changes")) | |
676 | for s in sorted(wc.substate): |
|
684 | for s in sorted(wc.substate): | |
677 | if wc.sub(s).dirty(): |
|
685 | if wc.sub(s).dirty(): | |
678 | raise util.Abort(_("outstanding uncommitted changes in " |
|
686 | raise util.Abort(_("outstanding uncommitted changes in " | |
679 | "subrepository '%s'") % s) |
|
687 | "subrepository '%s'") % s) | |
680 |
|
688 | |||
681 | elif not overwrite: |
|
689 | elif not overwrite: | |
682 | if pa == p1 or pa == p2: # linear |
|
690 | if pa == p1 or pa == p2: # linear | |
683 | pass # all good |
|
691 | pass # all good | |
684 | elif wc.dirty(missing=True): |
|
692 | elif wc.dirty(missing=True): | |
685 | raise util.Abort(_("crosses branches (merge branches or use" |
|
693 | raise util.Abort(_("crosses branches (merge branches or use" | |
686 | " --clean to discard changes)")) |
|
694 | " --clean to discard changes)")) | |
687 | elif onode is None: |
|
695 | elif onode is None: | |
688 | raise util.Abort(_("crosses branches (merge branches or update" |
|
696 | raise util.Abort(_("crosses branches (merge branches or update" | |
689 | " --check to force update)")) |
|
697 | " --check to force update)")) | |
690 | else: |
|
698 | else: | |
691 | # Allow jumping branches if clean and specific rev given |
|
699 | # Allow jumping branches if clean and specific rev given | |
692 | pa = p1 |
|
700 | pa = p1 | |
693 |
|
701 | |||
694 | ### calculate phase |
|
702 | ### calculate phase | |
695 | actions = calculateupdates(repo, wc, p2, pa, |
|
703 | actions = calculateupdates(repo, wc, p2, pa, | |
696 | branchmerge, force, partial) |
|
704 | branchmerge, force, partial, mergeancestor) | |
697 |
|
705 | |||
698 | ### apply phase |
|
706 | ### apply phase | |
699 | if not branchmerge: # just jump to the new rev |
|
707 | if not branchmerge: # just jump to the new rev | |
700 | fp1, fp2, xp1, xp2 = fp2, nullid, xp2, '' |
|
708 | fp1, fp2, xp1, xp2 = fp2, nullid, xp2, '' | |
701 | if not partial: |
|
709 | if not partial: | |
702 | repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2) |
|
710 | repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2) | |
703 |
|
711 | |||
704 | stats = applyupdates(repo, actions, wc, p2, pa, overwrite) |
|
712 | stats = applyupdates(repo, actions, wc, p2, pa, overwrite) | |
705 |
|
713 | |||
706 | if not partial: |
|
714 | if not partial: | |
707 | repo.setparents(fp1, fp2) |
|
715 | repo.setparents(fp1, fp2) | |
708 | recordupdates(repo, actions, branchmerge) |
|
716 | recordupdates(repo, actions, branchmerge) | |
709 | if not branchmerge: |
|
717 | if not branchmerge: | |
710 | repo.dirstate.setbranch(p2.branch()) |
|
718 | repo.dirstate.setbranch(p2.branch()) | |
711 | finally: |
|
719 | finally: | |
712 | wlock.release() |
|
720 | wlock.release() | |
713 |
|
721 | |||
714 | if not partial: |
|
722 | if not partial: | |
715 | repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3]) |
|
723 | repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3]) | |
716 | return stats |
|
724 | return stats |
@@ -1,724 +1,748 | |||||
1 | $ cat >> $HGRCPATH <<EOF |
|
1 | $ cat >> $HGRCPATH <<EOF | |
2 | > [extensions] |
|
2 | > [extensions] | |
3 | > graphlog= |
|
3 | > graphlog= | |
4 | > rebase= |
|
4 | > rebase= | |
5 | > mq= |
|
5 | > mq= | |
6 | > |
|
6 | > | |
7 | > [phases] |
|
7 | > [phases] | |
8 | > publish=False |
|
8 | > publish=False | |
9 | > |
|
9 | > | |
10 | > [alias] |
|
10 | > [alias] | |
11 | > tglog = log -G --template "{rev}: '{desc}' {branches}\n" |
|
11 | > tglog = log -G --template "{rev}: '{desc}' {branches}\n" | |
12 | > tglogp = log -G --template "{rev}:{phase} '{desc}' {branches}\n" |
|
12 | > tglogp = log -G --template "{rev}:{phase} '{desc}' {branches}\n" | |
13 | > EOF |
|
13 | > EOF | |
14 |
|
14 | |||
15 | Create repo a: |
|
15 | Create repo a: | |
16 |
|
16 | |||
17 | $ hg init a |
|
17 | $ hg init a | |
18 | $ cd a |
|
18 | $ cd a | |
19 | $ hg unbundle "$TESTDIR/bundles/rebase.hg" |
|
19 | $ hg unbundle "$TESTDIR/bundles/rebase.hg" | |
20 | adding changesets |
|
20 | adding changesets | |
21 | adding manifests |
|
21 | adding manifests | |
22 | adding file changes |
|
22 | adding file changes | |
23 | added 8 changesets with 7 changes to 7 files (+2 heads) |
|
23 | added 8 changesets with 7 changes to 7 files (+2 heads) | |
24 | (run 'hg heads' to see heads, 'hg merge' to merge) |
|
24 | (run 'hg heads' to see heads, 'hg merge' to merge) | |
25 | $ hg up tip |
|
25 | $ hg up tip | |
26 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
26 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
27 |
|
27 | |||
28 | $ hg tglog |
|
28 | $ hg tglog | |
29 | @ 7: 'H' |
|
29 | @ 7: 'H' | |
30 | | |
|
30 | | | |
31 | | o 6: 'G' |
|
31 | | o 6: 'G' | |
32 | |/| |
|
32 | |/| | |
33 | o | 5: 'F' |
|
33 | o | 5: 'F' | |
34 | | | |
|
34 | | | | |
35 | | o 4: 'E' |
|
35 | | o 4: 'E' | |
36 | |/ |
|
36 | |/ | |
37 | | o 3: 'D' |
|
37 | | o 3: 'D' | |
38 | | | |
|
38 | | | | |
39 | | o 2: 'C' |
|
39 | | o 2: 'C' | |
40 | | | |
|
40 | | | | |
41 | | o 1: 'B' |
|
41 | | o 1: 'B' | |
42 | |/ |
|
42 | |/ | |
43 | o 0: 'A' |
|
43 | o 0: 'A' | |
44 |
|
44 | |||
45 | $ cd .. |
|
45 | $ cd .. | |
46 |
|
46 | |||
47 |
|
47 | |||
48 | Rebasing B onto H and collapsing changesets with different phases: |
|
48 | Rebasing B onto H and collapsing changesets with different phases: | |
49 |
|
49 | |||
50 |
|
50 | |||
51 | $ hg clone -q -u 3 a a1 |
|
51 | $ hg clone -q -u 3 a a1 | |
52 | $ cd a1 |
|
52 | $ cd a1 | |
53 |
|
53 | |||
54 | $ hg phase --force --secret 3 |
|
54 | $ hg phase --force --secret 3 | |
55 |
|
55 | |||
56 | $ hg rebase --collapse --keepbranches |
|
56 | $ hg rebase --collapse --keepbranches | |
57 | saved backup bundle to $TESTTMP/a1/.hg/strip-backup/*-backup.hg (glob) |
|
57 | saved backup bundle to $TESTTMP/a1/.hg/strip-backup/*-backup.hg (glob) | |
58 |
|
58 | |||
59 | $ hg tglogp |
|
59 | $ hg tglogp | |
60 | @ 5:secret 'Collapsed revision |
|
60 | @ 5:secret 'Collapsed revision | |
61 | | * B |
|
61 | | * B | |
62 | | * C |
|
62 | | * C | |
63 | | * D' |
|
63 | | * D' | |
64 | o 4:draft 'H' |
|
64 | o 4:draft 'H' | |
65 | | |
|
65 | | | |
66 | | o 3:draft 'G' |
|
66 | | o 3:draft 'G' | |
67 | |/| |
|
67 | |/| | |
68 | o | 2:draft 'F' |
|
68 | o | 2:draft 'F' | |
69 | | | |
|
69 | | | | |
70 | | o 1:draft 'E' |
|
70 | | o 1:draft 'E' | |
71 | |/ |
|
71 | |/ | |
72 | o 0:draft 'A' |
|
72 | o 0:draft 'A' | |
73 |
|
73 | |||
74 | $ hg manifest |
|
74 | $ hg manifest | |
75 | A |
|
75 | A | |
76 | B |
|
76 | B | |
77 | C |
|
77 | C | |
78 | D |
|
78 | D | |
79 | F |
|
79 | F | |
80 | H |
|
80 | H | |
81 |
|
81 | |||
82 | $ cd .. |
|
82 | $ cd .. | |
83 |
|
83 | |||
84 |
|
84 | |||
85 | Rebasing E onto H: |
|
85 | Rebasing E onto H: | |
86 |
|
86 | |||
87 | $ hg clone -q -u . a a2 |
|
87 | $ hg clone -q -u . a a2 | |
88 | $ cd a2 |
|
88 | $ cd a2 | |
89 |
|
89 | |||
90 | $ hg phase --force --secret 6 |
|
90 | $ hg phase --force --secret 6 | |
91 | $ hg rebase --source 4 --collapse |
|
91 | $ hg rebase --source 4 --collapse | |
92 | saved backup bundle to $TESTTMP/a2/.hg/strip-backup/*-backup.hg (glob) |
|
92 | saved backup bundle to $TESTTMP/a2/.hg/strip-backup/*-backup.hg (glob) | |
93 |
|
93 | |||
94 | $ hg tglog |
|
94 | $ hg tglog | |
95 | @ 6: 'Collapsed revision |
|
95 | @ 6: 'Collapsed revision | |
96 | | * E |
|
96 | | * E | |
97 | | * G' |
|
97 | | * G' | |
98 | o 5: 'H' |
|
98 | o 5: 'H' | |
99 | | |
|
99 | | | |
100 | o 4: 'F' |
|
100 | o 4: 'F' | |
101 | | |
|
101 | | | |
102 | | o 3: 'D' |
|
102 | | o 3: 'D' | |
103 | | | |
|
103 | | | | |
104 | | o 2: 'C' |
|
104 | | o 2: 'C' | |
105 | | | |
|
105 | | | | |
106 | | o 1: 'B' |
|
106 | | o 1: 'B' | |
107 | |/ |
|
107 | |/ | |
108 | o 0: 'A' |
|
108 | o 0: 'A' | |
109 |
|
109 | |||
110 | $ hg manifest |
|
110 | $ hg manifest | |
111 | A |
|
111 | A | |
112 | E |
|
112 | E | |
113 | F |
|
113 | F | |
114 | H |
|
114 | H | |
115 |
|
115 | |||
116 | $ cd .. |
|
116 | $ cd .. | |
117 |
|
117 | |||
118 | Rebasing G onto H with custom message: |
|
118 | Rebasing G onto H with custom message: | |
119 |
|
119 | |||
120 | $ hg clone -q -u . a a3 |
|
120 | $ hg clone -q -u . a a3 | |
121 | $ cd a3 |
|
121 | $ cd a3 | |
122 |
|
122 | |||
123 | $ hg rebase --base 6 -m 'custom message' |
|
123 | $ hg rebase --base 6 -m 'custom message' | |
124 | abort: message can only be specified with collapse |
|
124 | abort: message can only be specified with collapse | |
125 | [255] |
|
125 | [255] | |
126 |
|
126 | |||
127 | $ hg rebase --source 4 --collapse -m 'custom message' |
|
127 | $ hg rebase --source 4 --collapse -m 'custom message' | |
128 | saved backup bundle to $TESTTMP/a3/.hg/strip-backup/*-backup.hg (glob) |
|
128 | saved backup bundle to $TESTTMP/a3/.hg/strip-backup/*-backup.hg (glob) | |
129 |
|
129 | |||
130 | $ hg tglog |
|
130 | $ hg tglog | |
131 | @ 6: 'custom message' |
|
131 | @ 6: 'custom message' | |
132 | | |
|
132 | | | |
133 | o 5: 'H' |
|
133 | o 5: 'H' | |
134 | | |
|
134 | | | |
135 | o 4: 'F' |
|
135 | o 4: 'F' | |
136 | | |
|
136 | | | |
137 | | o 3: 'D' |
|
137 | | o 3: 'D' | |
138 | | | |
|
138 | | | | |
139 | | o 2: 'C' |
|
139 | | o 2: 'C' | |
140 | | | |
|
140 | | | | |
141 | | o 1: 'B' |
|
141 | | o 1: 'B' | |
142 | |/ |
|
142 | |/ | |
143 | o 0: 'A' |
|
143 | o 0: 'A' | |
144 |
|
144 | |||
145 | $ hg manifest |
|
145 | $ hg manifest | |
146 | A |
|
146 | A | |
147 | E |
|
147 | E | |
148 | F |
|
148 | F | |
149 | H |
|
149 | H | |
150 |
|
150 | |||
151 | $ cd .. |
|
151 | $ cd .. | |
152 |
|
152 | |||
153 | Create repo b: |
|
153 | Create repo b: | |
154 |
|
154 | |||
155 | $ hg init b |
|
155 | $ hg init b | |
156 | $ cd b |
|
156 | $ cd b | |
157 |
|
157 | |||
158 | $ echo A > A |
|
158 | $ echo A > A | |
159 | $ hg ci -Am A |
|
159 | $ hg ci -Am A | |
160 | adding A |
|
160 | adding A | |
161 | $ echo B > B |
|
161 | $ echo B > B | |
162 | $ hg ci -Am B |
|
162 | $ hg ci -Am B | |
163 | adding B |
|
163 | adding B | |
164 |
|
164 | |||
165 | $ hg up -q 0 |
|
165 | $ hg up -q 0 | |
166 |
|
166 | |||
167 | $ echo C > C |
|
167 | $ echo C > C | |
168 | $ hg ci -Am C |
|
168 | $ hg ci -Am C | |
169 | adding C |
|
169 | adding C | |
170 | created new head |
|
170 | created new head | |
171 |
|
171 | |||
172 | $ hg merge |
|
172 | $ hg merge | |
173 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
173 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
174 | (branch merge, don't forget to commit) |
|
174 | (branch merge, don't forget to commit) | |
175 |
|
175 | |||
176 | $ echo D > D |
|
176 | $ echo D > D | |
177 | $ hg ci -Am D |
|
177 | $ hg ci -Am D | |
178 | adding D |
|
178 | adding D | |
179 |
|
179 | |||
180 | $ hg up -q 1 |
|
180 | $ hg up -q 1 | |
181 |
|
181 | |||
182 | $ echo E > E |
|
182 | $ echo E > E | |
183 | $ hg ci -Am E |
|
183 | $ hg ci -Am E | |
184 | adding E |
|
184 | adding E | |
185 | created new head |
|
185 | created new head | |
186 |
|
186 | |||
187 | $ echo F > F |
|
187 | $ echo F > F | |
188 | $ hg ci -Am F |
|
188 | $ hg ci -Am F | |
189 | adding F |
|
189 | adding F | |
190 |
|
190 | |||
191 | $ hg merge |
|
191 | $ hg merge | |
192 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
192 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
193 | (branch merge, don't forget to commit) |
|
193 | (branch merge, don't forget to commit) | |
194 | $ hg ci -m G |
|
194 | $ hg ci -m G | |
195 |
|
195 | |||
196 | $ hg up -q 0 |
|
196 | $ hg up -q 0 | |
197 |
|
197 | |||
198 | $ echo H > H |
|
198 | $ echo H > H | |
199 | $ hg ci -Am H |
|
199 | $ hg ci -Am H | |
200 | adding H |
|
200 | adding H | |
201 | created new head |
|
201 | created new head | |
202 |
|
202 | |||
203 | $ hg tglog |
|
203 | $ hg tglog | |
204 | @ 7: 'H' |
|
204 | @ 7: 'H' | |
205 | | |
|
205 | | | |
206 | | o 6: 'G' |
|
206 | | o 6: 'G' | |
207 | | |\ |
|
207 | | |\ | |
208 | | | o 5: 'F' |
|
208 | | | o 5: 'F' | |
209 | | | | |
|
209 | | | | | |
210 | | | o 4: 'E' |
|
210 | | | o 4: 'E' | |
211 | | | | |
|
211 | | | | | |
212 | | o | 3: 'D' |
|
212 | | o | 3: 'D' | |
213 | | |\| |
|
213 | | |\| | |
214 | | o | 2: 'C' |
|
214 | | o | 2: 'C' | |
215 | |/ / |
|
215 | |/ / | |
216 | | o 1: 'B' |
|
216 | | o 1: 'B' | |
217 | |/ |
|
217 | |/ | |
218 | o 0: 'A' |
|
218 | o 0: 'A' | |
219 |
|
219 | |||
220 | $ cd .. |
|
220 | $ cd .. | |
221 |
|
221 | |||
222 |
|
222 | |||
223 | Rebase and collapse - more than one external (fail): |
|
223 | Rebase and collapse - more than one external (fail): | |
224 |
|
224 | |||
225 | $ hg clone -q -u . b b1 |
|
225 | $ hg clone -q -u . b b1 | |
226 | $ cd b1 |
|
226 | $ cd b1 | |
227 |
|
227 | |||
228 | $ hg rebase -s 2 --collapse |
|
228 | $ hg rebase -s 2 --collapse | |
229 | abort: unable to collapse, there is more than one external parent |
|
229 | abort: unable to collapse, there is more than one external parent | |
230 | [255] |
|
230 | [255] | |
231 |
|
231 | |||
232 | Rebase and collapse - E onto H: |
|
232 | Rebase and collapse - E onto H: | |
233 |
|
233 | |||
234 | $ hg rebase -s 4 --collapse # root (4) is not a merge |
|
234 | $ hg rebase -s 4 --collapse # root (4) is not a merge | |
235 | saved backup bundle to $TESTTMP/b1/.hg/strip-backup/*-backup.hg (glob) |
|
235 | saved backup bundle to $TESTTMP/b1/.hg/strip-backup/*-backup.hg (glob) | |
236 |
|
236 | |||
237 | $ hg tglog |
|
237 | $ hg tglog | |
238 | @ 5: 'Collapsed revision |
|
238 | @ 5: 'Collapsed revision | |
239 | |\ * E |
|
239 | |\ * E | |
240 | | | * F |
|
240 | | | * F | |
241 | | | * G' |
|
241 | | | * G' | |
242 | | o 4: 'H' |
|
242 | | o 4: 'H' | |
243 | | | |
|
243 | | | | |
244 | o | 3: 'D' |
|
244 | o | 3: 'D' | |
245 | |\ \ |
|
245 | |\ \ | |
246 | | o | 2: 'C' |
|
246 | | o | 2: 'C' | |
247 | | |/ |
|
247 | | |/ | |
248 | o / 1: 'B' |
|
248 | o / 1: 'B' | |
249 | |/ |
|
249 | |/ | |
250 | o 0: 'A' |
|
250 | o 0: 'A' | |
251 |
|
251 | |||
252 | $ hg manifest |
|
252 | $ hg manifest | |
253 | A |
|
253 | A | |
254 | C |
|
254 | C | |
255 | D |
|
255 | D | |
256 | E |
|
256 | E | |
257 | F |
|
257 | F | |
258 | H |
|
258 | H | |
259 |
|
259 | |||
260 | $ cd .. |
|
260 | $ cd .. | |
261 |
|
261 | |||
262 |
|
262 | |||
263 |
|
263 | |||
264 |
|
264 | |||
265 | Test that branchheads cache is updated correctly when doing a strip in which |
|
265 | Test that branchheads cache is updated correctly when doing a strip in which | |
266 | the parent of the ancestor node to be stripped does not become a head and also, |
|
266 | the parent of the ancestor node to be stripped does not become a head and also, | |
267 | the parent of a node that is a child of the node stripped becomes a head (node |
|
267 | the parent of a node that is a child of the node stripped becomes a head (node | |
268 | 3). The code is now much simpler and we could just test a simpler scenario |
|
268 | 3). The code is now much simpler and we could just test a simpler scenario | |
269 | We keep it the test this way in case new complexity is injected. |
|
269 | We keep it the test this way in case new complexity is injected. | |
270 |
|
270 | |||
271 | $ hg clone -q -u . b b2 |
|
271 | $ hg clone -q -u . b b2 | |
272 | $ cd b2 |
|
272 | $ cd b2 | |
273 |
|
273 | |||
274 | $ hg heads --template="{rev}:{node} {branch}\n" |
|
274 | $ hg heads --template="{rev}:{node} {branch}\n" | |
275 | 7:c65502d4178782309ce0574c5ae6ee9485a9bafa default |
|
275 | 7:c65502d4178782309ce0574c5ae6ee9485a9bafa default | |
276 | 6:c772a8b2dc17629cec88a19d09c926c4814b12c7 default |
|
276 | 6:c772a8b2dc17629cec88a19d09c926c4814b12c7 default | |
277 |
|
277 | |||
278 | $ cat $TESTTMP/b2/.hg/cache/branchheads-served |
|
278 | $ cat $TESTTMP/b2/.hg/cache/branchheads-served | |
279 | c65502d4178782309ce0574c5ae6ee9485a9bafa 7 |
|
279 | c65502d4178782309ce0574c5ae6ee9485a9bafa 7 | |
280 | c772a8b2dc17629cec88a19d09c926c4814b12c7 default |
|
280 | c772a8b2dc17629cec88a19d09c926c4814b12c7 default | |
281 | c65502d4178782309ce0574c5ae6ee9485a9bafa default |
|
281 | c65502d4178782309ce0574c5ae6ee9485a9bafa default | |
282 |
|
282 | |||
283 | $ hg strip 4 |
|
283 | $ hg strip 4 | |
284 | saved backup bundle to $TESTTMP/b2/.hg/strip-backup/8a5212ebc852-backup.hg (glob) |
|
284 | saved backup bundle to $TESTTMP/b2/.hg/strip-backup/8a5212ebc852-backup.hg (glob) | |
285 |
|
285 | |||
286 | $ cat $TESTTMP/b2/.hg/cache/branchheads-served |
|
286 | $ cat $TESTTMP/b2/.hg/cache/branchheads-served | |
287 | c65502d4178782309ce0574c5ae6ee9485a9bafa 4 |
|
287 | c65502d4178782309ce0574c5ae6ee9485a9bafa 4 | |
288 | 2870ad076e541e714f3c2bc32826b5c6a6e5b040 default |
|
288 | 2870ad076e541e714f3c2bc32826b5c6a6e5b040 default | |
289 | c65502d4178782309ce0574c5ae6ee9485a9bafa default |
|
289 | c65502d4178782309ce0574c5ae6ee9485a9bafa default | |
290 |
|
290 | |||
291 | $ hg heads --template="{rev}:{node} {branch}\n" |
|
291 | $ hg heads --template="{rev}:{node} {branch}\n" | |
292 | 4:c65502d4178782309ce0574c5ae6ee9485a9bafa default |
|
292 | 4:c65502d4178782309ce0574c5ae6ee9485a9bafa default | |
293 | 3:2870ad076e541e714f3c2bc32826b5c6a6e5b040 default |
|
293 | 3:2870ad076e541e714f3c2bc32826b5c6a6e5b040 default | |
294 |
|
294 | |||
295 | $ cd .. |
|
295 | $ cd .. | |
296 |
|
296 | |||
297 |
|
297 | |||
298 |
|
298 | |||
299 |
|
299 | |||
300 |
|
300 | |||
301 |
|
301 | |||
302 | Create repo c: |
|
302 | Create repo c: | |
303 |
|
303 | |||
304 | $ hg init c |
|
304 | $ hg init c | |
305 | $ cd c |
|
305 | $ cd c | |
306 |
|
306 | |||
307 | $ echo A > A |
|
307 | $ echo A > A | |
308 | $ hg ci -Am A |
|
308 | $ hg ci -Am A | |
309 | adding A |
|
309 | adding A | |
310 | $ echo B > B |
|
310 | $ echo B > B | |
311 | $ hg ci -Am B |
|
311 | $ hg ci -Am B | |
312 | adding B |
|
312 | adding B | |
313 |
|
313 | |||
314 | $ hg up -q 0 |
|
314 | $ hg up -q 0 | |
315 |
|
315 | |||
316 | $ echo C > C |
|
316 | $ echo C > C | |
317 | $ hg ci -Am C |
|
317 | $ hg ci -Am C | |
318 | adding C |
|
318 | adding C | |
319 | created new head |
|
319 | created new head | |
320 |
|
320 | |||
321 | $ hg merge |
|
321 | $ hg merge | |
322 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
322 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
323 | (branch merge, don't forget to commit) |
|
323 | (branch merge, don't forget to commit) | |
324 |
|
324 | |||
325 | $ echo D > D |
|
325 | $ echo D > D | |
326 | $ hg ci -Am D |
|
326 | $ hg ci -Am D | |
327 | adding D |
|
327 | adding D | |
328 |
|
328 | |||
329 | $ hg up -q 1 |
|
329 | $ hg up -q 1 | |
330 |
|
330 | |||
331 | $ echo E > E |
|
331 | $ echo E > E | |
332 | $ hg ci -Am E |
|
332 | $ hg ci -Am E | |
333 | adding E |
|
333 | adding E | |
334 | created new head |
|
334 | created new head | |
335 | $ echo F > E |
|
335 | $ echo F > E | |
336 | $ hg ci -m 'F' |
|
336 | $ hg ci -m 'F' | |
337 |
|
337 | |||
338 | $ echo G > G |
|
338 | $ echo G > G | |
339 | $ hg ci -Am G |
|
339 | $ hg ci -Am G | |
340 | adding G |
|
340 | adding G | |
341 |
|
341 | |||
342 | $ hg merge |
|
342 | $ hg merge | |
343 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
343 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
344 | (branch merge, don't forget to commit) |
|
344 | (branch merge, don't forget to commit) | |
345 |
|
345 | |||
346 | $ hg ci -m H |
|
346 | $ hg ci -m H | |
347 |
|
347 | |||
348 | $ hg up -q 0 |
|
348 | $ hg up -q 0 | |
349 |
|
349 | |||
350 | $ echo I > I |
|
350 | $ echo I > I | |
351 | $ hg ci -Am I |
|
351 | $ hg ci -Am I | |
352 | adding I |
|
352 | adding I | |
353 | created new head |
|
353 | created new head | |
354 |
|
354 | |||
355 | $ hg tglog |
|
355 | $ hg tglog | |
356 | @ 8: 'I' |
|
356 | @ 8: 'I' | |
357 | | |
|
357 | | | |
358 | | o 7: 'H' |
|
358 | | o 7: 'H' | |
359 | | |\ |
|
359 | | |\ | |
360 | | | o 6: 'G' |
|
360 | | | o 6: 'G' | |
361 | | | | |
|
361 | | | | | |
362 | | | o 5: 'F' |
|
362 | | | o 5: 'F' | |
363 | | | | |
|
363 | | | | | |
364 | | | o 4: 'E' |
|
364 | | | o 4: 'E' | |
365 | | | | |
|
365 | | | | | |
366 | | o | 3: 'D' |
|
366 | | o | 3: 'D' | |
367 | | |\| |
|
367 | | |\| | |
368 | | o | 2: 'C' |
|
368 | | o | 2: 'C' | |
369 | |/ / |
|
369 | |/ / | |
370 | | o 1: 'B' |
|
370 | | o 1: 'B' | |
371 | |/ |
|
371 | |/ | |
372 | o 0: 'A' |
|
372 | o 0: 'A' | |
373 |
|
373 | |||
374 | $ cd .. |
|
374 | $ cd .. | |
375 |
|
375 | |||
376 |
|
376 | |||
377 | Rebase and collapse - E onto I: |
|
377 | Rebase and collapse - E onto I: | |
378 |
|
378 | |||
379 | $ hg clone -q -u . c c1 |
|
379 | $ hg clone -q -u . c c1 | |
380 | $ cd c1 |
|
380 | $ cd c1 | |
381 |
|
381 | |||
382 | $ hg rebase -s 4 --collapse # root (4) is not a merge |
|
382 | $ hg rebase -s 4 --collapse # root (4) is not a merge | |
383 | merging E |
|
383 | merging E | |
384 | saved backup bundle to $TESTTMP/c1/.hg/strip-backup/*-backup.hg (glob) |
|
384 | saved backup bundle to $TESTTMP/c1/.hg/strip-backup/*-backup.hg (glob) | |
385 |
|
385 | |||
386 | $ hg tglog |
|
386 | $ hg tglog | |
387 | @ 5: 'Collapsed revision |
|
387 | @ 5: 'Collapsed revision | |
388 | |\ * E |
|
388 | |\ * E | |
389 | | | * F |
|
389 | | | * F | |
390 | | | * G |
|
390 | | | * G | |
391 | | | * H' |
|
391 | | | * H' | |
392 | | o 4: 'I' |
|
392 | | o 4: 'I' | |
393 | | | |
|
393 | | | | |
394 | o | 3: 'D' |
|
394 | o | 3: 'D' | |
395 | |\ \ |
|
395 | |\ \ | |
396 | | o | 2: 'C' |
|
396 | | o | 2: 'C' | |
397 | | |/ |
|
397 | | |/ | |
398 | o / 1: 'B' |
|
398 | o / 1: 'B' | |
399 | |/ |
|
399 | |/ | |
400 | o 0: 'A' |
|
400 | o 0: 'A' | |
401 |
|
401 | |||
402 | $ hg manifest |
|
402 | $ hg manifest | |
403 | A |
|
403 | A | |
404 | C |
|
404 | C | |
405 | D |
|
405 | D | |
406 | E |
|
406 | E | |
407 | G |
|
407 | G | |
408 | I |
|
408 | I | |
409 |
|
409 | |||
410 | $ cat E |
|
410 | $ cat E | |
411 | F |
|
411 | F | |
412 |
|
412 | |||
413 | $ cd .. |
|
413 | $ cd .. | |
414 |
|
414 | |||
415 |
|
415 | |||
416 | Create repo d: |
|
416 | Create repo d: | |
417 |
|
417 | |||
418 | $ hg init d |
|
418 | $ hg init d | |
419 | $ cd d |
|
419 | $ cd d | |
420 |
|
420 | |||
421 | $ echo A > A |
|
421 | $ echo A > A | |
422 | $ hg ci -Am A |
|
422 | $ hg ci -Am A | |
423 | adding A |
|
423 | adding A | |
424 | $ echo B > B |
|
424 | $ echo B > B | |
425 | $ hg ci -Am B |
|
425 | $ hg ci -Am B | |
426 | adding B |
|
426 | adding B | |
427 | $ echo C > C |
|
427 | $ echo C > C | |
428 | $ hg ci -Am C |
|
428 | $ hg ci -Am C | |
429 | adding C |
|
429 | adding C | |
430 |
|
430 | |||
431 | $ hg up -q 1 |
|
431 | $ hg up -q 1 | |
432 |
|
432 | |||
433 | $ echo D > D |
|
433 | $ echo D > D | |
434 | $ hg ci -Am D |
|
434 | $ hg ci -Am D | |
435 | adding D |
|
435 | adding D | |
436 | created new head |
|
436 | created new head | |
437 | $ hg merge |
|
437 | $ hg merge | |
438 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
438 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
439 | (branch merge, don't forget to commit) |
|
439 | (branch merge, don't forget to commit) | |
440 |
|
440 | |||
441 | $ hg ci -m E |
|
441 | $ hg ci -m E | |
442 |
|
442 | |||
443 | $ hg up -q 0 |
|
443 | $ hg up -q 0 | |
444 |
|
444 | |||
445 | $ echo F > F |
|
445 | $ echo F > F | |
446 | $ hg ci -Am F |
|
446 | $ hg ci -Am F | |
447 | adding F |
|
447 | adding F | |
448 | created new head |
|
448 | created new head | |
449 |
|
449 | |||
450 | $ hg tglog |
|
450 | $ hg tglog | |
451 | @ 5: 'F' |
|
451 | @ 5: 'F' | |
452 | | |
|
452 | | | |
453 | | o 4: 'E' |
|
453 | | o 4: 'E' | |
454 | | |\ |
|
454 | | |\ | |
455 | | | o 3: 'D' |
|
455 | | | o 3: 'D' | |
456 | | | | |
|
456 | | | | | |
457 | | o | 2: 'C' |
|
457 | | o | 2: 'C' | |
458 | | |/ |
|
458 | | |/ | |
459 | | o 1: 'B' |
|
459 | | o 1: 'B' | |
460 | |/ |
|
460 | |/ | |
461 | o 0: 'A' |
|
461 | o 0: 'A' | |
462 |
|
462 | |||
463 | $ cd .. |
|
463 | $ cd .. | |
464 |
|
464 | |||
465 |
|
465 | |||
466 | Rebase and collapse - B onto F: |
|
466 | Rebase and collapse - B onto F: | |
467 |
|
467 | |||
468 | $ hg clone -q -u . d d1 |
|
468 | $ hg clone -q -u . d d1 | |
469 | $ cd d1 |
|
469 | $ cd d1 | |
470 |
|
470 | |||
471 | $ hg rebase -s 1 --collapse |
|
471 | $ hg rebase -s 1 --collapse | |
472 | saved backup bundle to $TESTTMP/d1/.hg/strip-backup/*-backup.hg (glob) |
|
472 | saved backup bundle to $TESTTMP/d1/.hg/strip-backup/*-backup.hg (glob) | |
473 |
|
473 | |||
474 | $ hg tglog |
|
474 | $ hg tglog | |
475 | @ 2: 'Collapsed revision |
|
475 | @ 2: 'Collapsed revision | |
476 | | * B |
|
476 | | * B | |
477 | | * C |
|
477 | | * C | |
478 | | * D |
|
478 | | * D | |
479 | | * E' |
|
479 | | * E' | |
480 | o 1: 'F' |
|
480 | o 1: 'F' | |
481 | | |
|
481 | | | |
482 | o 0: 'A' |
|
482 | o 0: 'A' | |
483 |
|
483 | |||
484 | $ hg manifest |
|
484 | $ hg manifest | |
485 | A |
|
485 | A | |
486 | B |
|
486 | B | |
487 | C |
|
487 | C | |
488 | D |
|
488 | D | |
489 | F |
|
489 | F | |
490 |
|
490 | |||
491 | Interactions between collapse and keepbranches |
|
491 | Interactions between collapse and keepbranches | |
492 | $ cd .. |
|
492 | $ cd .. | |
493 | $ hg init e |
|
493 | $ hg init e | |
494 | $ cd e |
|
494 | $ cd e | |
495 | $ echo 'a' > a |
|
495 | $ echo 'a' > a | |
496 | $ hg ci -Am 'A' |
|
496 | $ hg ci -Am 'A' | |
497 | adding a |
|
497 | adding a | |
498 |
|
498 | |||
499 | $ hg branch 'one' |
|
499 | $ hg branch 'one' | |
500 | marked working directory as branch one |
|
500 | marked working directory as branch one | |
501 | (branches are permanent and global, did you want a bookmark?) |
|
501 | (branches are permanent and global, did you want a bookmark?) | |
502 | $ echo 'b' > b |
|
502 | $ echo 'b' > b | |
503 | $ hg ci -Am 'B' |
|
503 | $ hg ci -Am 'B' | |
504 | adding b |
|
504 | adding b | |
505 |
|
505 | |||
506 | $ hg branch 'two' |
|
506 | $ hg branch 'two' | |
507 | marked working directory as branch two |
|
507 | marked working directory as branch two | |
508 | (branches are permanent and global, did you want a bookmark?) |
|
508 | (branches are permanent and global, did you want a bookmark?) | |
509 | $ echo 'c' > c |
|
509 | $ echo 'c' > c | |
510 | $ hg ci -Am 'C' |
|
510 | $ hg ci -Am 'C' | |
511 | adding c |
|
511 | adding c | |
512 |
|
512 | |||
513 | $ hg up -q 0 |
|
513 | $ hg up -q 0 | |
514 | $ echo 'd' > d |
|
514 | $ echo 'd' > d | |
515 | $ hg ci -Am 'D' |
|
515 | $ hg ci -Am 'D' | |
516 | adding d |
|
516 | adding d | |
517 |
|
517 | |||
518 | $ hg tglog |
|
518 | $ hg tglog | |
519 | @ 3: 'D' |
|
519 | @ 3: 'D' | |
520 | | |
|
520 | | | |
521 | | o 2: 'C' two |
|
521 | | o 2: 'C' two | |
522 | | | |
|
522 | | | | |
523 | | o 1: 'B' one |
|
523 | | o 1: 'B' one | |
524 | |/ |
|
524 | |/ | |
525 | o 0: 'A' |
|
525 | o 0: 'A' | |
526 |
|
526 | |||
527 | $ hg rebase --keepbranches --collapse -s 1 -d 3 |
|
527 | $ hg rebase --keepbranches --collapse -s 1 -d 3 | |
528 | abort: cannot collapse multiple named branches |
|
528 | abort: cannot collapse multiple named branches | |
529 | [255] |
|
529 | [255] | |
530 |
|
530 | |||
531 | $ repeatchange() { |
|
531 | $ repeatchange() { | |
532 | > hg checkout $1 |
|
532 | > hg checkout $1 | |
533 | > hg cp d z |
|
533 | > hg cp d z | |
534 | > echo blah >> z |
|
534 | > echo blah >> z | |
535 | > hg commit -Am "$2" --user "$3" |
|
535 | > hg commit -Am "$2" --user "$3" | |
536 | > } |
|
536 | > } | |
537 | $ repeatchange 3 "E" "user1" |
|
537 | $ repeatchange 3 "E" "user1" | |
538 | 0 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
538 | 0 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
539 | $ repeatchange 3 "E" "user2" |
|
539 | $ repeatchange 3 "E" "user2" | |
540 | 0 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
540 | 0 files updated, 0 files merged, 1 files removed, 0 files unresolved | |
541 | created new head |
|
541 | created new head | |
542 | $ hg tglog |
|
542 | $ hg tglog | |
543 | @ 5: 'E' |
|
543 | @ 5: 'E' | |
544 | | |
|
544 | | | |
545 | | o 4: 'E' |
|
545 | | o 4: 'E' | |
546 | |/ |
|
546 | |/ | |
547 | o 3: 'D' |
|
547 | o 3: 'D' | |
548 | | |
|
548 | | | |
549 | | o 2: 'C' two |
|
549 | | o 2: 'C' two | |
550 | | | |
|
550 | | | | |
551 | | o 1: 'B' one |
|
551 | | o 1: 'B' one | |
552 | |/ |
|
552 | |/ | |
553 | o 0: 'A' |
|
553 | o 0: 'A' | |
554 |
|
554 | |||
555 | $ hg rebase -s 5 -d 4 |
|
555 | $ hg rebase -s 5 -d 4 | |
556 | saved backup bundle to $TESTTMP/e/.hg/strip-backup/*-backup.hg (glob) |
|
556 | saved backup bundle to $TESTTMP/e/.hg/strip-backup/*-backup.hg (glob) | |
557 | $ hg tglog |
|
557 | $ hg tglog | |
558 | @ 4: 'E' |
|
558 | @ 4: 'E' | |
559 | | |
|
559 | | | |
560 | o 3: 'D' |
|
560 | o 3: 'D' | |
561 | | |
|
561 | | | |
562 | | o 2: 'C' two |
|
562 | | o 2: 'C' two | |
563 | | | |
|
563 | | | | |
564 | | o 1: 'B' one |
|
564 | | o 1: 'B' one | |
565 | |/ |
|
565 | |/ | |
566 | o 0: 'A' |
|
566 | o 0: 'A' | |
567 |
|
567 | |||
568 | $ hg export tip |
|
568 | $ hg export tip | |
569 | # HG changeset patch |
|
569 | # HG changeset patch | |
570 | # User user1 |
|
570 | # User user1 | |
571 | # Date 0 0 |
|
571 | # Date 0 0 | |
572 | # Thu Jan 01 00:00:00 1970 +0000 |
|
572 | # Thu Jan 01 00:00:00 1970 +0000 | |
573 | # Node ID f338eb3c2c7cc5b5915676a2376ba7ac558c5213 |
|
573 | # Node ID f338eb3c2c7cc5b5915676a2376ba7ac558c5213 | |
574 | # Parent 41acb9dca9eb976e84cd21fcb756b4afa5a35c09 |
|
574 | # Parent 41acb9dca9eb976e84cd21fcb756b4afa5a35c09 | |
575 | E |
|
575 | E | |
576 |
|
576 | |||
577 | diff -r 41acb9dca9eb -r f338eb3c2c7c z |
|
577 | diff -r 41acb9dca9eb -r f338eb3c2c7c z | |
578 | --- /dev/null Thu Jan 01 00:00:00 1970 +0000 |
|
578 | --- /dev/null Thu Jan 01 00:00:00 1970 +0000 | |
579 | +++ b/z Thu Jan 01 00:00:00 1970 +0000 |
|
579 | +++ b/z Thu Jan 01 00:00:00 1970 +0000 | |
580 | @@ -0,0 +1,2 @@ |
|
580 | @@ -0,0 +1,2 @@ | |
581 | +d |
|
581 | +d | |
582 | +blah |
|
582 | +blah | |
583 |
|
583 | |||
584 | $ cd .. |
|
584 | $ cd .. | |
585 |
|
585 | |||
586 | Rebase, collapse and copies |
|
586 | Rebase, collapse and copies | |
587 |
|
587 | |||
588 | $ hg init copies |
|
588 | $ hg init copies | |
589 | $ cd copies |
|
589 | $ cd copies | |
590 | $ hg unbundle "$TESTDIR/bundles/renames.hg" |
|
590 | $ hg unbundle "$TESTDIR/bundles/renames.hg" | |
591 | adding changesets |
|
591 | adding changesets | |
592 | adding manifests |
|
592 | adding manifests | |
593 | adding file changes |
|
593 | adding file changes | |
594 | added 4 changesets with 11 changes to 7 files (+1 heads) |
|
594 | added 4 changesets with 11 changes to 7 files (+1 heads) | |
595 | (run 'hg heads' to see heads, 'hg merge' to merge) |
|
595 | (run 'hg heads' to see heads, 'hg merge' to merge) | |
596 | $ hg up -q tip |
|
596 | $ hg up -q tip | |
597 | $ hg tglog |
|
597 | $ hg tglog | |
598 | @ 3: 'move2' |
|
598 | @ 3: 'move2' | |
599 | | |
|
599 | | | |
600 | o 2: 'move1' |
|
600 | o 2: 'move1' | |
601 | | |
|
601 | | | |
602 | | o 1: 'change' |
|
602 | | o 1: 'change' | |
603 | |/ |
|
603 | |/ | |
604 | o 0: 'add' |
|
604 | o 0: 'add' | |
605 |
|
605 | |||
606 | $ hg rebase --collapse -d 1 |
|
606 | $ hg rebase --collapse -d 1 | |
607 | merging a and d to d |
|
607 | merging a and d to d | |
608 | merging b and e to e |
|
608 | merging b and e to e | |
609 | merging c and f to f |
|
609 | merging c and f to f | |
610 | merging e and g to g |
|
610 | merging e and g to g | |
611 | merging f and c to c |
|
611 | merging f and c to c | |
612 | saved backup bundle to $TESTTMP/copies/.hg/strip-backup/*-backup.hg (glob) |
|
612 | saved backup bundle to $TESTTMP/copies/.hg/strip-backup/*-backup.hg (glob) | |
613 | $ hg st |
|
613 | $ hg st | |
614 | $ hg st --copies --change . |
|
614 | $ hg st --copies --change . | |
615 | A d |
|
615 | A d | |
616 | a |
|
616 | a | |
617 | A g |
|
617 | A g | |
618 | b |
|
618 | b | |
619 | R b |
|
619 | R b | |
620 | $ cat c |
|
620 | $ cat c | |
621 | c |
|
621 | c | |
622 | c |
|
622 | c | |
623 | $ cat d |
|
623 | $ cat d | |
624 | a |
|
624 | a | |
625 | a |
|
625 | a | |
626 | $ cat g |
|
626 | $ cat g | |
627 | b |
|
627 | b | |
628 | b |
|
628 | b | |
629 | $ hg log -r . --template "{file_copies}\n" |
|
629 | $ hg log -r . --template "{file_copies}\n" | |
630 | d (a)g (b) |
|
630 | d (a)g (b) | |
631 |
|
631 | |||
632 | Test collapsing a middle revision in-place |
|
632 | Test collapsing a middle revision in-place | |
633 |
|
633 | |||
634 | $ hg tglog |
|
634 | $ hg tglog | |
635 | @ 2: 'Collapsed revision |
|
635 | @ 2: 'Collapsed revision | |
636 | | * move1 |
|
636 | | * move1 | |
637 | | * move2' |
|
637 | | * move2' | |
638 | o 1: 'change' |
|
638 | o 1: 'change' | |
639 | | |
|
639 | | | |
640 | o 0: 'add' |
|
640 | o 0: 'add' | |
641 |
|
641 | |||
642 | $ hg rebase --collapse -r 1 -d 0 |
|
642 | $ hg rebase --collapse -r 1 -d 0 | |
643 | abort: can't remove original changesets with unrebased descendants |
|
643 | abort: can't remove original changesets with unrebased descendants | |
644 | (use --keep to keep original changesets) |
|
644 | (use --keep to keep original changesets) | |
645 | [255] |
|
645 | [255] | |
646 |
|
646 | |||
647 | Test collapsing in place |
|
647 | Test collapsing in place | |
648 |
|
648 | |||
649 | $ hg rebase --collapse -b . -d 0 |
|
649 | $ hg rebase --collapse -b . -d 0 | |
650 | saved backup bundle to $TESTTMP/copies/.hg/strip-backup/*-backup.hg (glob) |
|
650 | saved backup bundle to $TESTTMP/copies/.hg/strip-backup/*-backup.hg (glob) | |
651 | $ hg st --change . --copies |
|
651 | $ hg st --change . --copies | |
652 | M a |
|
652 | M a | |
653 | M c |
|
653 | M c | |
654 | A d |
|
654 | A d | |
655 | a |
|
655 | a | |
656 | A g |
|
656 | A g | |
657 | b |
|
657 | b | |
658 | R b |
|
658 | R b | |
659 | $ cat a |
|
659 | $ cat a | |
660 | a |
|
660 | a | |
661 | a |
|
661 | a | |
662 | $ cat c |
|
662 | $ cat c | |
663 | c |
|
663 | c | |
664 | c |
|
664 | c | |
665 | $ cat d |
|
665 | $ cat d | |
666 | a |
|
666 | a | |
667 | a |
|
667 | a | |
668 | $ cat g |
|
668 | $ cat g | |
669 | b |
|
669 | b | |
670 | b |
|
670 | b | |
671 | $ cd .. |
|
671 | $ cd .. | |
672 |
|
672 | |||
673 |
|
673 | |||
674 | Test stripping a revision with another child |
|
674 | Test stripping a revision with another child | |
675 |
|
675 | |||
676 | $ hg init f |
|
676 | $ hg init f | |
677 | $ cd f |
|
677 | $ cd f | |
678 |
|
678 | |||
679 | $ echo A > A |
|
679 | $ echo A > A | |
680 | $ hg ci -Am A |
|
680 | $ hg ci -Am A | |
681 | adding A |
|
681 | adding A | |
682 | $ echo B > B |
|
682 | $ echo B > B | |
683 | $ hg ci -Am B |
|
683 | $ hg ci -Am B | |
684 | adding B |
|
684 | adding B | |
685 |
|
685 | |||
686 | $ hg up -q 0 |
|
686 | $ hg up -q 0 | |
687 |
|
687 | |||
688 | $ echo C > C |
|
688 | $ echo C > C | |
689 | $ hg ci -Am C |
|
689 | $ hg ci -Am C | |
690 | adding C |
|
690 | adding C | |
691 | created new head |
|
691 | created new head | |
692 |
|
692 | |||
693 | $ hg tglog |
|
693 | $ hg tglog | |
694 | @ 2: 'C' |
|
694 | @ 2: 'C' | |
695 | | |
|
695 | | | |
696 | | o 1: 'B' |
|
696 | | o 1: 'B' | |
697 | |/ |
|
697 | |/ | |
698 | o 0: 'A' |
|
698 | o 0: 'A' | |
699 |
|
699 | |||
700 |
|
700 | |||
701 |
|
701 | |||
702 | $ hg heads --template="{rev}:{node} {branch}: {desc}\n" |
|
702 | $ hg heads --template="{rev}:{node} {branch}: {desc}\n" | |
703 | 2:c5cefa58fd557f84b72b87f970135984337acbc5 default: C |
|
703 | 2:c5cefa58fd557f84b72b87f970135984337acbc5 default: C | |
704 | 1:27547f69f25460a52fff66ad004e58da7ad3fb56 default: B |
|
704 | 1:27547f69f25460a52fff66ad004e58da7ad3fb56 default: B | |
705 |
|
705 | |||
706 | $ hg strip 2 |
|
706 | $ hg strip 2 | |
707 | 0 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
707 | 0 files updated, 0 files merged, 1 files removed, 0 files unresolved | |
708 | saved backup bundle to $TESTTMP/f/.hg/strip-backup/*-backup.hg (glob) |
|
708 | saved backup bundle to $TESTTMP/f/.hg/strip-backup/*-backup.hg (glob) | |
709 |
|
709 | |||
710 | $ hg tglog |
|
710 | $ hg tglog | |
711 | o 1: 'B' |
|
711 | o 1: 'B' | |
712 | | |
|
712 | | | |
713 | @ 0: 'A' |
|
713 | @ 0: 'A' | |
714 |
|
714 | |||
715 |
|
715 | |||
716 |
|
716 | |||
717 | $ hg heads --template="{rev}:{node} {branch}: {desc}\n" |
|
717 | $ hg heads --template="{rev}:{node} {branch}: {desc}\n" | |
718 | 1:27547f69f25460a52fff66ad004e58da7ad3fb56 default: B |
|
718 | 1:27547f69f25460a52fff66ad004e58da7ad3fb56 default: B | |
719 |
|
719 | |||
720 | $ cd .. |
|
720 | $ cd .. | |
721 |
|
721 | |||
|
722 | Test collapsing changes that add then remove a file | |||
722 |
|
723 | |||
|
724 | $ hg init collapseaddremove | |||
|
725 | $ cd collapseaddremove | |||
723 |
|
726 | |||
|
727 | $ touch base | |||
|
728 | $ hg commit -Am base | |||
|
729 | adding base | |||
|
730 | $ touch a | |||
|
731 | $ hg commit -Am a | |||
|
732 | adding a | |||
|
733 | $ hg rm a | |||
|
734 | $ touch b | |||
|
735 | $ hg commit -Am b | |||
|
736 | adding b | |||
|
737 | $ hg rebase -d 0 -r "1::2" --collapse -m collapsed | |||
|
738 | saved backup bundle to $TESTTMP/collapseaddremove/.hg/strip-backup/*-backup.hg (glob) | |||
|
739 | $ hg tglog | |||
|
740 | @ 1: 'collapsed' | |||
|
741 | | | |||
|
742 | o 0: 'base' | |||
724 |
|
743 | |||
|
744 | $ hg manifest | |||
|
745 | b | |||
|
746 | base | |||
|
747 | ||||
|
748 | $ cd .. |
@@ -1,400 +1,398 | |||||
1 | $ cat >> $HGRCPATH <<EOF |
|
1 | $ cat >> $HGRCPATH <<EOF | |
2 | > [extensions] |
|
2 | > [extensions] | |
3 | > graphlog= |
|
3 | > graphlog= | |
4 | > rebase= |
|
4 | > rebase= | |
5 | > |
|
5 | > | |
6 | > [phases] |
|
6 | > [phases] | |
7 | > publish=False |
|
7 | > publish=False | |
8 | > |
|
8 | > | |
9 | > [alias] |
|
9 | > [alias] | |
10 | > tglog = log -G --template "{rev}: '{desc}' {branches}\n" |
|
10 | > tglog = log -G --template "{rev}: '{desc}' {branches}\n" | |
11 | > EOF |
|
11 | > EOF | |
12 |
|
12 | |||
13 |
|
13 | |||
14 | $ hg init a |
|
14 | $ hg init a | |
15 | $ cd a |
|
15 | $ cd a | |
16 | $ hg unbundle "$TESTDIR/bundles/rebase.hg" |
|
16 | $ hg unbundle "$TESTDIR/bundles/rebase.hg" | |
17 | adding changesets |
|
17 | adding changesets | |
18 | adding manifests |
|
18 | adding manifests | |
19 | adding file changes |
|
19 | adding file changes | |
20 | added 8 changesets with 7 changes to 7 files (+2 heads) |
|
20 | added 8 changesets with 7 changes to 7 files (+2 heads) | |
21 | (run 'hg heads' to see heads, 'hg merge' to merge) |
|
21 | (run 'hg heads' to see heads, 'hg merge' to merge) | |
22 | $ hg up tip |
|
22 | $ hg up tip | |
23 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
23 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
24 |
|
24 | |||
25 | $ cd .. |
|
25 | $ cd .. | |
26 |
|
26 | |||
27 |
|
27 | |||
28 | Rebasing D onto H detaching from C: |
|
28 | Rebasing D onto H detaching from C: | |
29 |
|
29 | |||
30 | $ hg clone -q -u . a a1 |
|
30 | $ hg clone -q -u . a a1 | |
31 | $ cd a1 |
|
31 | $ cd a1 | |
32 |
|
32 | |||
33 | $ hg tglog |
|
33 | $ hg tglog | |
34 | @ 7: 'H' |
|
34 | @ 7: 'H' | |
35 | | |
|
35 | | | |
36 | | o 6: 'G' |
|
36 | | o 6: 'G' | |
37 | |/| |
|
37 | |/| | |
38 | o | 5: 'F' |
|
38 | o | 5: 'F' | |
39 | | | |
|
39 | | | | |
40 | | o 4: 'E' |
|
40 | | o 4: 'E' | |
41 | |/ |
|
41 | |/ | |
42 | | o 3: 'D' |
|
42 | | o 3: 'D' | |
43 | | | |
|
43 | | | | |
44 | | o 2: 'C' |
|
44 | | o 2: 'C' | |
45 | | | |
|
45 | | | | |
46 | | o 1: 'B' |
|
46 | | o 1: 'B' | |
47 | |/ |
|
47 | |/ | |
48 | o 0: 'A' |
|
48 | o 0: 'A' | |
49 |
|
49 | |||
50 | $ hg phase --force --secret 3 |
|
50 | $ hg phase --force --secret 3 | |
51 | $ hg rebase -s 3 -d 7 |
|
51 | $ hg rebase -s 3 -d 7 | |
52 | saved backup bundle to $TESTTMP/a1/.hg/strip-backup/*-backup.hg (glob) |
|
52 | saved backup bundle to $TESTTMP/a1/.hg/strip-backup/*-backup.hg (glob) | |
53 |
|
53 | |||
54 | $ hg log -G --template "{rev}:{phase} '{desc}' {branches}\n" |
|
54 | $ hg log -G --template "{rev}:{phase} '{desc}' {branches}\n" | |
55 | @ 7:secret 'D' |
|
55 | @ 7:secret 'D' | |
56 | | |
|
56 | | | |
57 | o 6:draft 'H' |
|
57 | o 6:draft 'H' | |
58 | | |
|
58 | | | |
59 | | o 5:draft 'G' |
|
59 | | o 5:draft 'G' | |
60 | |/| |
|
60 | |/| | |
61 | o | 4:draft 'F' |
|
61 | o | 4:draft 'F' | |
62 | | | |
|
62 | | | | |
63 | | o 3:draft 'E' |
|
63 | | o 3:draft 'E' | |
64 | |/ |
|
64 | |/ | |
65 | | o 2:draft 'C' |
|
65 | | o 2:draft 'C' | |
66 | | | |
|
66 | | | | |
67 | | o 1:draft 'B' |
|
67 | | o 1:draft 'B' | |
68 | |/ |
|
68 | |/ | |
69 | o 0:draft 'A' |
|
69 | o 0:draft 'A' | |
70 |
|
70 | |||
71 | $ hg manifest |
|
71 | $ hg manifest | |
72 | A |
|
72 | A | |
73 | D |
|
73 | D | |
74 | F |
|
74 | F | |
75 | H |
|
75 | H | |
76 |
|
76 | |||
77 | $ cd .. |
|
77 | $ cd .. | |
78 |
|
78 | |||
79 |
|
79 | |||
80 | Rebasing C onto H detaching from B: |
|
80 | Rebasing C onto H detaching from B: | |
81 |
|
81 | |||
82 | $ hg clone -q -u . a a2 |
|
82 | $ hg clone -q -u . a a2 | |
83 | $ cd a2 |
|
83 | $ cd a2 | |
84 |
|
84 | |||
85 | $ hg tglog |
|
85 | $ hg tglog | |
86 | @ 7: 'H' |
|
86 | @ 7: 'H' | |
87 | | |
|
87 | | | |
88 | | o 6: 'G' |
|
88 | | o 6: 'G' | |
89 | |/| |
|
89 | |/| | |
90 | o | 5: 'F' |
|
90 | o | 5: 'F' | |
91 | | | |
|
91 | | | | |
92 | | o 4: 'E' |
|
92 | | o 4: 'E' | |
93 | |/ |
|
93 | |/ | |
94 | | o 3: 'D' |
|
94 | | o 3: 'D' | |
95 | | | |
|
95 | | | | |
96 | | o 2: 'C' |
|
96 | | o 2: 'C' | |
97 | | | |
|
97 | | | | |
98 | | o 1: 'B' |
|
98 | | o 1: 'B' | |
99 | |/ |
|
99 | |/ | |
100 | o 0: 'A' |
|
100 | o 0: 'A' | |
101 |
|
101 | |||
102 | $ hg rebase -s 2 -d 7 |
|
102 | $ hg rebase -s 2 -d 7 | |
103 | saved backup bundle to $TESTTMP/a2/.hg/strip-backup/*-backup.hg (glob) |
|
103 | saved backup bundle to $TESTTMP/a2/.hg/strip-backup/*-backup.hg (glob) | |
104 |
|
104 | |||
105 | $ hg tglog |
|
105 | $ hg tglog | |
106 | @ 7: 'D' |
|
106 | @ 7: 'D' | |
107 | | |
|
107 | | | |
108 | o 6: 'C' |
|
108 | o 6: 'C' | |
109 | | |
|
109 | | | |
110 | o 5: 'H' |
|
110 | o 5: 'H' | |
111 | | |
|
111 | | | |
112 | | o 4: 'G' |
|
112 | | o 4: 'G' | |
113 | |/| |
|
113 | |/| | |
114 | o | 3: 'F' |
|
114 | o | 3: 'F' | |
115 | | | |
|
115 | | | | |
116 | | o 2: 'E' |
|
116 | | o 2: 'E' | |
117 | |/ |
|
117 | |/ | |
118 | | o 1: 'B' |
|
118 | | o 1: 'B' | |
119 | |/ |
|
119 | |/ | |
120 | o 0: 'A' |
|
120 | o 0: 'A' | |
121 |
|
121 | |||
122 | $ hg manifest |
|
122 | $ hg manifest | |
123 | A |
|
123 | A | |
124 | C |
|
124 | C | |
125 | D |
|
125 | D | |
126 | F |
|
126 | F | |
127 | H |
|
127 | H | |
128 |
|
128 | |||
129 | $ cd .. |
|
129 | $ cd .. | |
130 |
|
130 | |||
131 |
|
131 | |||
132 | Rebasing B onto H using detach (same as not using it): |
|
132 | Rebasing B onto H using detach (same as not using it): | |
133 |
|
133 | |||
134 | $ hg clone -q -u . a a3 |
|
134 | $ hg clone -q -u . a a3 | |
135 | $ cd a3 |
|
135 | $ cd a3 | |
136 |
|
136 | |||
137 | $ hg tglog |
|
137 | $ hg tglog | |
138 | @ 7: 'H' |
|
138 | @ 7: 'H' | |
139 | | |
|
139 | | | |
140 | | o 6: 'G' |
|
140 | | o 6: 'G' | |
141 | |/| |
|
141 | |/| | |
142 | o | 5: 'F' |
|
142 | o | 5: 'F' | |
143 | | | |
|
143 | | | | |
144 | | o 4: 'E' |
|
144 | | o 4: 'E' | |
145 | |/ |
|
145 | |/ | |
146 | | o 3: 'D' |
|
146 | | o 3: 'D' | |
147 | | | |
|
147 | | | | |
148 | | o 2: 'C' |
|
148 | | o 2: 'C' | |
149 | | | |
|
149 | | | | |
150 | | o 1: 'B' |
|
150 | | o 1: 'B' | |
151 | |/ |
|
151 | |/ | |
152 | o 0: 'A' |
|
152 | o 0: 'A' | |
153 |
|
153 | |||
154 | $ hg rebase -s 1 -d 7 |
|
154 | $ hg rebase -s 1 -d 7 | |
155 | saved backup bundle to $TESTTMP/a3/.hg/strip-backup/*-backup.hg (glob) |
|
155 | saved backup bundle to $TESTTMP/a3/.hg/strip-backup/*-backup.hg (glob) | |
156 |
|
156 | |||
157 | $ hg tglog |
|
157 | $ hg tglog | |
158 | @ 7: 'D' |
|
158 | @ 7: 'D' | |
159 | | |
|
159 | | | |
160 | o 6: 'C' |
|
160 | o 6: 'C' | |
161 | | |
|
161 | | | |
162 | o 5: 'B' |
|
162 | o 5: 'B' | |
163 | | |
|
163 | | | |
164 | o 4: 'H' |
|
164 | o 4: 'H' | |
165 | | |
|
165 | | | |
166 | | o 3: 'G' |
|
166 | | o 3: 'G' | |
167 | |/| |
|
167 | |/| | |
168 | o | 2: 'F' |
|
168 | o | 2: 'F' | |
169 | | | |
|
169 | | | | |
170 | | o 1: 'E' |
|
170 | | o 1: 'E' | |
171 | |/ |
|
171 | |/ | |
172 | o 0: 'A' |
|
172 | o 0: 'A' | |
173 |
|
173 | |||
174 | $ hg manifest |
|
174 | $ hg manifest | |
175 | A |
|
175 | A | |
176 | B |
|
176 | B | |
177 | C |
|
177 | C | |
178 | D |
|
178 | D | |
179 | F |
|
179 | F | |
180 | H |
|
180 | H | |
181 |
|
181 | |||
182 | $ cd .. |
|
182 | $ cd .. | |
183 |
|
183 | |||
184 |
|
184 | |||
185 | Rebasing C onto H detaching from B and collapsing: |
|
185 | Rebasing C onto H detaching from B and collapsing: | |
186 |
|
186 | |||
187 | $ hg clone -q -u . a a4 |
|
187 | $ hg clone -q -u . a a4 | |
188 | $ cd a4 |
|
188 | $ cd a4 | |
189 | $ hg phase --force --secret 3 |
|
189 | $ hg phase --force --secret 3 | |
190 |
|
190 | |||
191 | $ hg tglog |
|
191 | $ hg tglog | |
192 | @ 7: 'H' |
|
192 | @ 7: 'H' | |
193 | | |
|
193 | | | |
194 | | o 6: 'G' |
|
194 | | o 6: 'G' | |
195 | |/| |
|
195 | |/| | |
196 | o | 5: 'F' |
|
196 | o | 5: 'F' | |
197 | | | |
|
197 | | | | |
198 | | o 4: 'E' |
|
198 | | o 4: 'E' | |
199 | |/ |
|
199 | |/ | |
200 | | o 3: 'D' |
|
200 | | o 3: 'D' | |
201 | | | |
|
201 | | | | |
202 | | o 2: 'C' |
|
202 | | o 2: 'C' | |
203 | | | |
|
203 | | | | |
204 | | o 1: 'B' |
|
204 | | o 1: 'B' | |
205 | |/ |
|
205 | |/ | |
206 | o 0: 'A' |
|
206 | o 0: 'A' | |
207 |
|
207 | |||
208 | $ hg rebase --collapse -s 2 -d 7 |
|
208 | $ hg rebase --collapse -s 2 -d 7 | |
209 | saved backup bundle to $TESTTMP/a4/.hg/strip-backup/*-backup.hg (glob) |
|
209 | saved backup bundle to $TESTTMP/a4/.hg/strip-backup/*-backup.hg (glob) | |
210 |
|
210 | |||
211 | $ hg log -G --template "{rev}:{phase} '{desc}' {branches}\n" |
|
211 | $ hg log -G --template "{rev}:{phase} '{desc}' {branches}\n" | |
212 | @ 6:secret 'Collapsed revision |
|
212 | @ 6:secret 'Collapsed revision | |
213 | | * C |
|
213 | | * C | |
214 | | * D' |
|
214 | | * D' | |
215 | o 5:draft 'H' |
|
215 | o 5:draft 'H' | |
216 | | |
|
216 | | | |
217 | | o 4:draft 'G' |
|
217 | | o 4:draft 'G' | |
218 | |/| |
|
218 | |/| | |
219 | o | 3:draft 'F' |
|
219 | o | 3:draft 'F' | |
220 | | | |
|
220 | | | | |
221 | | o 2:draft 'E' |
|
221 | | o 2:draft 'E' | |
222 | |/ |
|
222 | |/ | |
223 | | o 1:draft 'B' |
|
223 | | o 1:draft 'B' | |
224 | |/ |
|
224 | |/ | |
225 | o 0:draft 'A' |
|
225 | o 0:draft 'A' | |
226 |
|
226 | |||
227 | $ hg manifest |
|
227 | $ hg manifest | |
228 | A |
|
228 | A | |
229 | C |
|
229 | C | |
230 | D |
|
230 | D | |
231 | F |
|
231 | F | |
232 | H |
|
232 | H | |
233 |
|
233 | |||
234 | $ cd .. |
|
234 | $ cd .. | |
235 |
|
235 | |||
236 | Rebasing across null as ancestor |
|
236 | Rebasing across null as ancestor | |
237 | $ hg clone -q -U a a5 |
|
237 | $ hg clone -q -U a a5 | |
238 |
|
238 | |||
239 | $ cd a5 |
|
239 | $ cd a5 | |
240 |
|
240 | |||
241 | $ echo x > x |
|
241 | $ echo x > x | |
242 |
|
242 | |||
243 | $ hg add x |
|
243 | $ hg add x | |
244 |
|
244 | |||
245 | $ hg ci -m "extra branch" |
|
245 | $ hg ci -m "extra branch" | |
246 | created new head |
|
246 | created new head | |
247 |
|
247 | |||
248 | $ hg tglog |
|
248 | $ hg tglog | |
249 | @ 8: 'extra branch' |
|
249 | @ 8: 'extra branch' | |
250 |
|
250 | |||
251 | o 7: 'H' |
|
251 | o 7: 'H' | |
252 | | |
|
252 | | | |
253 | | o 6: 'G' |
|
253 | | o 6: 'G' | |
254 | |/| |
|
254 | |/| | |
255 | o | 5: 'F' |
|
255 | o | 5: 'F' | |
256 | | | |
|
256 | | | | |
257 | | o 4: 'E' |
|
257 | | o 4: 'E' | |
258 | |/ |
|
258 | |/ | |
259 | | o 3: 'D' |
|
259 | | o 3: 'D' | |
260 | | | |
|
260 | | | | |
261 | | o 2: 'C' |
|
261 | | o 2: 'C' | |
262 | | | |
|
262 | | | | |
263 | | o 1: 'B' |
|
263 | | o 1: 'B' | |
264 | |/ |
|
264 | |/ | |
265 | o 0: 'A' |
|
265 | o 0: 'A' | |
266 |
|
266 | |||
267 | $ hg rebase -s 1 -d tip |
|
267 | $ hg rebase -s 1 -d tip | |
268 | saved backup bundle to $TESTTMP/a5/.hg/strip-backup/*-backup.hg (glob) |
|
268 | saved backup bundle to $TESTTMP/a5/.hg/strip-backup/*-backup.hg (glob) | |
269 |
|
269 | |||
270 | $ hg tglog |
|
270 | $ hg tglog | |
271 | @ 8: 'D' |
|
271 | @ 8: 'D' | |
272 | | |
|
272 | | | |
273 | o 7: 'C' |
|
273 | o 7: 'C' | |
274 | | |
|
274 | | | |
275 | o 6: 'B' |
|
275 | o 6: 'B' | |
276 | | |
|
276 | | | |
277 | o 5: 'extra branch' |
|
277 | o 5: 'extra branch' | |
278 |
|
278 | |||
279 | o 4: 'H' |
|
279 | o 4: 'H' | |
280 | | |
|
280 | | | |
281 | | o 3: 'G' |
|
281 | | o 3: 'G' | |
282 | |/| |
|
282 | |/| | |
283 | o | 2: 'F' |
|
283 | o | 2: 'F' | |
284 | | | |
|
284 | | | | |
285 | | o 1: 'E' |
|
285 | | o 1: 'E' | |
286 | |/ |
|
286 | |/ | |
287 | o 0: 'A' |
|
287 | o 0: 'A' | |
288 |
|
288 | |||
289 |
|
289 | |||
290 | $ hg rebase -d 5 -s 7 |
|
290 | $ hg rebase -d 5 -s 7 | |
291 | saved backup bundle to $TESTTMP/a5/.hg/strip-backup/13547172c9c0-backup.hg (glob) |
|
291 | saved backup bundle to $TESTTMP/a5/.hg/strip-backup/13547172c9c0-backup.hg (glob) | |
292 | $ hg tglog |
|
292 | $ hg tglog | |
293 | @ 8: 'D' |
|
293 | @ 8: 'D' | |
294 | | |
|
294 | | | |
295 | o 7: 'C' |
|
295 | o 7: 'C' | |
296 | | |
|
296 | | | |
297 | | o 6: 'B' |
|
297 | | o 6: 'B' | |
298 | |/ |
|
298 | |/ | |
299 | o 5: 'extra branch' |
|
299 | o 5: 'extra branch' | |
300 |
|
300 | |||
301 | o 4: 'H' |
|
301 | o 4: 'H' | |
302 | | |
|
302 | | | |
303 | | o 3: 'G' |
|
303 | | o 3: 'G' | |
304 | |/| |
|
304 | |/| | |
305 | o | 2: 'F' |
|
305 | o | 2: 'F' | |
306 | | | |
|
306 | | | | |
307 | | o 1: 'E' |
|
307 | | o 1: 'E' | |
308 | |/ |
|
308 | |/ | |
309 | o 0: 'A' |
|
309 | o 0: 'A' | |
310 |
|
310 | |||
311 | $ cd .. |
|
311 | $ cd .. | |
312 |
|
312 | |||
313 | Verify that target is not selected as external rev (issue3085) |
|
313 | Verify that target is not selected as external rev (issue3085) | |
314 |
|
314 | |||
315 | $ hg clone -q -U a a6 |
|
315 | $ hg clone -q -U a a6 | |
316 | $ cd a6 |
|
316 | $ cd a6 | |
317 | $ hg up -q 6 |
|
317 | $ hg up -q 6 | |
318 |
|
318 | |||
319 | $ echo "I" >> E |
|
319 | $ echo "I" >> E | |
320 | $ hg ci -m "I" |
|
320 | $ hg ci -m "I" | |
321 | $ hg merge 7 |
|
321 | $ hg merge 7 | |
322 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
322 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
323 | (branch merge, don't forget to commit) |
|
323 | (branch merge, don't forget to commit) | |
324 | $ hg ci -m "Merge" |
|
324 | $ hg ci -m "Merge" | |
325 | $ echo "J" >> F |
|
325 | $ echo "J" >> F | |
326 | $ hg ci -m "J" |
|
326 | $ hg ci -m "J" | |
327 |
|
327 | |||
328 | $ hg rebase -s 8 -d 7 --collapse --config ui.merge=internal:other |
|
328 | $ hg rebase -s 8 -d 7 --collapse --config ui.merge=internal:other | |
329 | remote changed E which local deleted |
|
|||
330 | use (c)hanged version or leave (d)eleted? c |
|
|||
331 | saved backup bundle to $TESTTMP/a6/.hg/strip-backup/*-backup.hg (glob) |
|
329 | saved backup bundle to $TESTTMP/a6/.hg/strip-backup/*-backup.hg (glob) | |
332 |
|
330 | |||
333 | $ hg tglog |
|
331 | $ hg tglog | |
334 | @ 8: 'Collapsed revision |
|
332 | @ 8: 'Collapsed revision | |
335 | | * I |
|
333 | | * I | |
336 | | * Merge |
|
334 | | * Merge | |
337 | | * J' |
|
335 | | * J' | |
338 | o 7: 'H' |
|
336 | o 7: 'H' | |
339 | | |
|
337 | | | |
340 | | o 6: 'G' |
|
338 | | o 6: 'G' | |
341 | |/| |
|
339 | |/| | |
342 | o | 5: 'F' |
|
340 | o | 5: 'F' | |
343 | | | |
|
341 | | | | |
344 | | o 4: 'E' |
|
342 | | o 4: 'E' | |
345 | |/ |
|
343 | |/ | |
346 | | o 3: 'D' |
|
344 | | o 3: 'D' | |
347 | | | |
|
345 | | | | |
348 | | o 2: 'C' |
|
346 | | o 2: 'C' | |
349 | | | |
|
347 | | | | |
350 | | o 1: 'B' |
|
348 | | o 1: 'B' | |
351 | |/ |
|
349 | |/ | |
352 | o 0: 'A' |
|
350 | o 0: 'A' | |
353 |
|
351 | |||
354 |
|
352 | |||
355 | $ hg parents |
|
353 | $ hg parents | |
356 | changeset: 8:9472f4b1d736 |
|
354 | changeset: 8:9472f4b1d736 | |
357 | tag: tip |
|
355 | tag: tip | |
358 | user: test |
|
356 | user: test | |
359 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
357 | date: Thu Jan 01 00:00:00 1970 +0000 | |
360 | summary: Collapsed revision |
|
358 | summary: Collapsed revision | |
361 |
|
359 | |||
362 |
|
360 | |||
363 | $ cd .. |
|
361 | $ cd .. | |
364 |
|
362 | |||
365 | Ensure --continue restores a correct state (issue3046) and phase: |
|
363 | Ensure --continue restores a correct state (issue3046) and phase: | |
366 | $ hg clone -q a a7 |
|
364 | $ hg clone -q a a7 | |
367 | $ cd a7 |
|
365 | $ cd a7 | |
368 | $ hg up -q 3 |
|
366 | $ hg up -q 3 | |
369 | $ echo 'H2' > H |
|
367 | $ echo 'H2' > H | |
370 | $ hg ci -A -m 'H2' |
|
368 | $ hg ci -A -m 'H2' | |
371 | adding H |
|
369 | adding H | |
372 | $ hg phase --force --secret 8 |
|
370 | $ hg phase --force --secret 8 | |
373 | $ hg rebase -s 8 -d 7 --config ui.merge=internal:fail |
|
371 | $ hg rebase -s 8 -d 7 --config ui.merge=internal:fail | |
374 | merging H |
|
372 | merging H | |
375 | warning: conflicts during merge. |
|
373 | warning: conflicts during merge. | |
376 | merging H incomplete! (edit conflicts, then use 'hg resolve --mark') |
|
374 | merging H incomplete! (edit conflicts, then use 'hg resolve --mark') | |
377 | abort: unresolved conflicts (see hg resolve, then hg rebase --continue) |
|
375 | abort: unresolved conflicts (see hg resolve, then hg rebase --continue) | |
378 | [255] |
|
376 | [255] | |
379 | $ hg resolve --all -t internal:local |
|
377 | $ hg resolve --all -t internal:local | |
380 | $ hg rebase -c |
|
378 | $ hg rebase -c | |
381 | saved backup bundle to $TESTTMP/a7/.hg/strip-backup/6215fafa5447-backup.hg (glob) |
|
379 | saved backup bundle to $TESTTMP/a7/.hg/strip-backup/6215fafa5447-backup.hg (glob) | |
382 | $ hg log -G --template "{rev}:{phase} '{desc}' {branches}\n" |
|
380 | $ hg log -G --template "{rev}:{phase} '{desc}' {branches}\n" | |
383 | @ 7:draft 'H' |
|
381 | @ 7:draft 'H' | |
384 | | |
|
382 | | | |
385 | | o 6:draft 'G' |
|
383 | | o 6:draft 'G' | |
386 | |/| |
|
384 | |/| | |
387 | o | 5:draft 'F' |
|
385 | o | 5:draft 'F' | |
388 | | | |
|
386 | | | | |
389 | | o 4:draft 'E' |
|
387 | | o 4:draft 'E' | |
390 | |/ |
|
388 | |/ | |
391 | | o 3:draft 'D' |
|
389 | | o 3:draft 'D' | |
392 | | | |
|
390 | | | | |
393 | | o 2:draft 'C' |
|
391 | | o 2:draft 'C' | |
394 | | | |
|
392 | | | | |
395 | | o 1:draft 'B' |
|
393 | | o 1:draft 'B' | |
396 | |/ |
|
394 | |/ | |
397 | o 0:draft 'A' |
|
395 | o 0:draft 'A' | |
398 |
|
396 | |||
399 |
|
397 | |||
400 | $ cd .. |
|
398 | $ cd .. |
General Comments 0
You need to be logged in to leave comments.
Login now