Show More
The requested changes are too big and content was truncated. Show full diff
@@ -1,137 +1,137 | |||
|
1 | 1 | # All revsets ever used with revsetbenchmarks.py script |
|
2 | 2 | # |
|
3 | 3 | # The goal of this file is to gather all revsets ever used for benchmarking |
|
4 | 4 | # revset's performance. It should be used to gather revsets that test a |
|
5 | 5 | # specific usecase or a specific implementation of revset predicates. |
|
6 | 6 | # If you are working on the smartset implementation itself, check |
|
7 | 7 | # 'base-revsets.txt'. |
|
8 | 8 | # |
|
9 | 9 | # Please update this file with any revsets you use for benchmarking a change so |
|
10 | 10 | # that future contributors can easily find and retest it when doing further |
|
11 |
# modification. Feel free to highlight inter |
|
|
11 | # modification. Feel free to highlight interesting variants if needed. | |
|
12 | 12 | |
|
13 | 13 | |
|
14 | 14 | ## Revset from this section are all extracted from changelog when this file was |
|
15 | 15 | # created. Feel free to dig and improve documentation. |
|
16 | 16 | |
|
17 | 17 | # Used in revision da05fe01170b |
|
18 | 18 | (20000::) - (20000) |
|
19 | 19 | # Used in revision 95af98616aa7 |
|
20 | 20 | parents(20000) |
|
21 | 21 | # Used in revision 186fd06283b4 |
|
22 | 22 | (_intlist('20000\x0020001')) and merge() |
|
23 | 23 | # Used in revision 911f5a6579d1 |
|
24 | 24 | p1(20000) |
|
25 | 25 | p2(10000) |
|
26 | 26 | # Used in revision b6dc3b79bb25 |
|
27 | 27 | 0:: |
|
28 | 28 | # Used in revision faf4f63533ff |
|
29 | 29 | bookmark() |
|
30 | 30 | # Used in revision 22ba2c0825da |
|
31 | 31 | tip~25 |
|
32 | 32 | # Used in revision 0cf46b8298fe |
|
33 | 33 | bisect(range) |
|
34 | 34 | # Used in revision 5b65429721d5 |
|
35 | 35 | divergent() |
|
36 | 36 | # Used in revision 6261b9c549a2 |
|
37 | 37 | file(COPYING) |
|
38 | 38 | # Used in revision 44f471102f3a |
|
39 | 39 | follow(COPYING) |
|
40 | 40 | # Used in revision 8040a44aab1c |
|
41 | 41 | origin(tip) |
|
42 | 42 | # Used in revision bbf4f3dfd700 |
|
43 | 43 | rev(25) |
|
44 | 44 | # Used in revision a428db9ab61d |
|
45 | 45 | p1() |
|
46 | 46 | # Used in revision c1546d7400ef |
|
47 | 47 | min(0::) |
|
48 | 48 | # Used in revision 546fa6576815 |
|
49 | 49 | author(lmoscovicz) or author(mpm) |
|
50 | 50 | author(mpm) or author(lmoscovicz) |
|
51 | 51 | # Used in revision 9bfe68357c01 |
|
52 | 52 | public() and id("d82e2223f132") |
|
53 | 53 | # Used in revision ba89f7b542c9 |
|
54 | 54 | rev(25) |
|
55 | 55 | # Used in revision eb763217152a |
|
56 | 56 | rev(210000) |
|
57 | 57 | # Used in revision 69524a05a7fa |
|
58 | 58 | 10:100 |
|
59 | 59 | parents(10):parents(100) |
|
60 | 60 | # Used in revision 6f1b8b3f12fd |
|
61 | 61 | 100~5 |
|
62 | 62 | parents(100)~5 |
|
63 | 63 | (100~5)~5 |
|
64 | 64 | # Used in revision 7a42e5d4c418 |
|
65 | 65 | children(tip~100) |
|
66 | 66 | # Used in revision 7e8737e6ab08 |
|
67 | 67 | 100^1 |
|
68 | 68 | parents(100)^1 |
|
69 | 69 | (100^1)^1 |
|
70 | 70 | # Used in revision 30e0dcd7c5ff |
|
71 | 71 | matching(100) |
|
72 | 72 | matching(parents(100)) |
|
73 | 73 | # Used in revision aafeaba22826 |
|
74 | 74 | 0|1|2|3|4|5|6|7|8|9 |
|
75 | 75 | # Used in revision 33c7a94d4dd0 |
|
76 | 76 | tip:0 |
|
77 | 77 | # Used in revision 7d369fae098e |
|
78 | 78 | (0:100000) |
|
79 | 79 | # Used in revision b333ca94403d |
|
80 | 80 | 0 + 1 + 2 + ... + 200 |
|
81 | 81 | 0 + 1 + 2 + ... + 1000 |
|
82 | 82 | sort(0 + 1 + 2 + ... + 200) |
|
83 | 83 | sort(0 + 1 + 2 + ... + 1000) |
|
84 | 84 | # Used in revision 7fbef7932af9 |
|
85 | 85 | first(0 + 1 + 2 + ... + 1000) |
|
86 | 86 | # Used in revision ceaf04bb14ff |
|
87 | 87 | 0:1000 |
|
88 | 88 | # Used in revision 262e6ad93885 |
|
89 | 89 | not public() |
|
90 | 90 | (tip~1000::) - public() |
|
91 | 91 | not public() and branch("default") |
|
92 | 92 | # Used in revision 15412bba5a68 |
|
93 | 93 | 0::tip |
|
94 | 94 | |
|
95 | 95 | ## all the revsets from this section have been taken from the former central file |
|
96 | 96 | # for revset's benchmarking, they are undocumented for this reason. |
|
97 | 97 | all() |
|
98 | 98 | draft() |
|
99 | 99 | ::tip |
|
100 | 100 | draft() and ::tip |
|
101 | 101 | ::tip and draft() |
|
102 | 102 | author(lmoscovicz) |
|
103 | 103 | author(mpm) |
|
104 | 104 | ::p1(p1(tip)):: |
|
105 | 105 | public() |
|
106 | 106 | :10000 and public() |
|
107 | 107 | :10000 and draft() |
|
108 | 108 | (not public() - obsolete()) |
|
109 | 109 | |
|
110 | 110 | # The one below is used by rebase |
|
111 | 111 | (children(ancestor(tip~5, tip)) and ::(tip~5)):: |
|
112 | 112 | |
|
113 | 113 | # those two `roots(...)` inputs are close to what phase movement use. |
|
114 | 114 | roots((tip~100::) - (tip~100::tip)) |
|
115 | 115 | roots((0::) - (0::tip)) |
|
116 | 116 | |
|
117 | 117 | # more roots testing |
|
118 | 118 | roots(tip~100:) |
|
119 | 119 | roots(:42) |
|
120 | 120 | roots(not public()) |
|
121 | 121 | roots((0:tip)::) |
|
122 | 122 | roots(0::tip) |
|
123 | 123 | 42:68 and roots(42:tip) |
|
124 | 124 | # Used in revision f140d6207cca |
|
125 | 125 | roots(0:tip) |
|
126 | 126 | # test disjoint set with multiple roots |
|
127 | 127 | roots((:42) + (tip~42:)) |
|
128 | 128 | |
|
129 | 129 | # Testing the behavior of "head()" in various situations |
|
130 | 130 | head() |
|
131 | 131 | head() - public() |
|
132 | 132 | draft() and head() |
|
133 | 133 | head() and author("mpm") |
|
134 | 134 | |
|
135 | 135 | # testing the mutable phases set |
|
136 | 136 | draft() |
|
137 | 137 | secret() |
@@ -1,868 +1,868 | |||
|
1 | 1 | " VIM plugin for doing single, multi-patch or diff code reviews {{{ |
|
2 | 2 | " Home: http://www.vim.org/scripts/script.php?script_id=1563 |
|
3 | 3 | |
|
4 | 4 | " Version : 0.2.2 "{{{ |
|
5 | 5 | " Author : Manpreet Singh < junkblocker@yahoo.com > |
|
6 | 6 | " Copyright : 2006-2010 by Manpreet Singh |
|
7 | 7 | " License : This file is placed in the public domain. |
|
8 | 8 | " No warranties express or implied. Use at your own risk. |
|
9 | 9 | " |
|
10 | 10 | " Changelog : |
|
11 | 11 | " |
|
12 | 12 | " 0.2.2 - Security fixes by removing custom tempfile creation |
|
13 | 13 | " - Removed need for DiffReviewCleanup/PatchReviewCleanup |
|
14 | 14 | " - Better command execution error detection and display |
|
15 | 15 | " - Improved diff view and folding by ignoring modelines |
|
16 | 16 | " - Improved tab labels display |
|
17 | 17 | " |
|
18 | 18 | " 0.2.1 - Minor temp directory autodetection logic and cleanup |
|
19 | 19 | " |
|
20 | 20 | " 0.2 - Removed the need for filterdiff by implementing it in pure vim script |
|
21 | 21 | " - Added DiffReview command for reverse (changed repository to |
|
22 | 22 | " pristine state) reviews. |
|
23 | 23 | " (PatchReview does pristine repository to patch review) |
|
24 | 24 | " - DiffReview does automatic detection and generation of diffs for |
|
25 | 25 | " various Source Control systems |
|
26 | 26 | " - Skip load if VIM 7.0 or higher unavailable |
|
27 | 27 | " |
|
28 | 28 | " 0.1 - First released |
|
29 | 29 | "}}} |
|
30 | 30 | |
|
31 | 31 | " Documentation: "{{{ |
|
32 | 32 | " =========================================================================== |
|
33 | 33 | " This plugin allows single or multiple, patch or diff based code reviews to |
|
34 | 34 | " be easily done in VIM. VIM has :diffpatch command to do single file reviews |
|
35 | 35 | " but a) can not handle patch files containing multiple patches or b) do |
|
36 | 36 | " automated diff generation for various version control systems. This plugin |
|
37 | 37 | " attempts to provide those functionalities. It opens each changed / added or |
|
38 | 38 | " removed file diff in new tabs. |
|
39 | 39 | " |
|
40 | 40 | " Installing: |
|
41 | 41 | " |
|
42 | 42 | " For a quick start, unzip patchreview.zip into your ~/.vim directory and |
|
43 | 43 | " restart Vim. |
|
44 | 44 | " |
|
45 | 45 | " Details: |
|
46 | 46 | " |
|
47 | 47 | " Requirements: |
|
48 | 48 | " |
|
49 | 49 | " 1) VIM 7.0 or higher built with +diff option. |
|
50 | 50 | " |
|
51 | 51 | " 2) A gnu compatible patch command installed. This is the standard patch |
|
52 | 52 | " command on Linux, Mac OS X, *BSD, Cygwin or /usr/bin/gpatch on newer |
|
53 | 53 | " Solaris. |
|
54 | 54 | " |
|
55 | 55 | " 3) Optional (but recommended for speed) |
|
56 | 56 | " |
|
57 | 57 | " Install patchutils ( http://cyberelk.net/tim/patchutils/ ) for your |
|
58 | 58 | " OS. For windows it is available from Cygwin |
|
59 | 59 | " |
|
60 | 60 | " http://www.cygwin.com |
|
61 | 61 | " |
|
62 | 62 | " or GnuWin32 |
|
63 | 63 | " |
|
64 | 64 | " http://gnuwin32.sourceforge.net/ |
|
65 | 65 | " |
|
66 | 66 | " Install: |
|
67 | 67 | " |
|
68 | 68 | " 1) Extract the zip in your $HOME/.vim or $VIM/vimfiles directory and |
|
69 | 69 | " restart vim. The directory location relevant to your platform can be |
|
70 | 70 | " seen by running :help add-global-plugin in vim. |
|
71 | 71 | " |
|
72 | 72 | " 2) Restart vim. |
|
73 | 73 | " |
|
74 | 74 | " Configuration: |
|
75 | 75 | " |
|
76 | 76 | " Optionally, specify the locations to these filterdiff and patch commands |
|
77 | 77 | " and location of a temporary directory to use in your .vimrc. |
|
78 | 78 | " |
|
79 | 79 | " let g:patchreview_patch = '/path/to/gnu/patch' |
|
80 | 80 | " |
|
81 | 81 | " " If you are using filterdiff |
|
82 | 82 | " let g:patchreview_filterdiff = '/path/to/filterdiff' |
|
83 | 83 | " |
|
84 | 84 | " |
|
85 | 85 | " Usage: |
|
86 | 86 | " |
|
87 | 87 | " Please see :help patchreview or :help diffreview for details. |
|
88 | 88 | " |
|
89 | 89 | ""}}} |
|
90 | 90 | |
|
91 | 91 | " Enabled only during development |
|
92 | 92 | " unlet! g:loaded_patchreview " DEBUG |
|
93 | 93 | " unlet! g:patchreview_patch " DEBUG |
|
94 | 94 | " unlet! g:patchreview_filterdiff " DEBUG |
|
95 | 95 | " let g:patchreview_patch = 'patch' " DEBUG |
|
96 | 96 | |
|
97 | 97 | if v:version < 700 |
|
98 | 98 | finish |
|
99 | 99 | endif |
|
100 | 100 | if ! has('diff') |
|
101 | 101 | call confirm('patchreview.vim plugin needs (G)VIM built with +diff support to work.') |
|
102 | 102 | finish |
|
103 | 103 | endif |
|
104 | 104 | |
|
105 | 105 | " load only once |
|
106 | 106 | if (! exists('g:patchreview_debug') && exists('g:loaded_patchreview')) || &compatible |
|
107 | 107 | finish |
|
108 | 108 | endif |
|
109 | 109 | let g:loaded_patchreview="0.2.2" |
|
110 | 110 | |
|
111 | 111 | let s:msgbufname = '-PatchReviewMessages-' |
|
112 | 112 | |
|
113 | 113 | function! <SID>Debug(str) "{{{ |
|
114 | 114 | if exists('g:patchreview_debug') |
|
115 | 115 | Pecho 'DEBUG: ' . a:str |
|
116 | 116 | endif |
|
117 | 117 | endfunction |
|
118 | 118 | command! -nargs=+ -complete=expression Debug call s:Debug(<args>) |
|
119 | 119 | "}}} |
|
120 | 120 | |
|
121 | 121 | function! <SID>PR_wipeMsgBuf() "{{{ |
|
122 | 122 | let winnum = bufwinnr(s:msgbufname) |
|
123 | 123 | if winnum != -1 " If the window is already open, jump to it |
|
124 | 124 | let cur_winnr = winnr() |
|
125 | 125 | if winnr() != winnum |
|
126 | 126 | exe winnum . 'wincmd w' |
|
127 | 127 | exe 'bw' |
|
128 | 128 | exe cur_winnr . 'wincmd w' |
|
129 | 129 | endif |
|
130 | 130 | endif |
|
131 | 131 | endfunction |
|
132 | 132 | "}}} |
|
133 | 133 | |
|
134 | 134 | function! <SID>Pecho(...) "{{{ |
|
135 | 135 | " Usage: Pecho(msg, [return_to_original_window_flag]) |
|
136 | 136 | " default return_to_original_window_flag = 0 |
|
137 | 137 | " |
|
138 | 138 | let cur_winnr = winnr() |
|
139 | 139 | let winnum = bufwinnr(s:msgbufname) |
|
140 | 140 | if winnum != -1 " If the window is already open, jump to it |
|
141 | 141 | if winnr() != winnum |
|
142 | 142 | exe winnum . 'wincmd w' |
|
143 | 143 | endif |
|
144 | 144 | else |
|
145 | 145 | let bufnum = bufnr(s:msgbufname) |
|
146 | 146 | if bufnum == -1 |
|
147 | 147 | let wcmd = s:msgbufname |
|
148 | 148 | else |
|
149 | 149 | let wcmd = '+buffer' . bufnum |
|
150 | 150 | endif |
|
151 | 151 | exe 'silent! botright 5split ' . wcmd |
|
152 | 152 | endif |
|
153 | 153 | setlocal modifiable |
|
154 | 154 | setlocal buftype=nofile |
|
155 | 155 | setlocal bufhidden=delete |
|
156 | 156 | setlocal noswapfile |
|
157 | 157 | setlocal nowrap |
|
158 | 158 | setlocal nobuflisted |
|
159 | 159 | if a:0 != 0 |
|
160 | 160 | silent! $put =a:1 |
|
161 | 161 | endif |
|
162 | 162 | exe ':$' |
|
163 | 163 | setlocal nomodifiable |
|
164 | 164 | if a:0 > 1 && a:2 |
|
165 | 165 | exe cur_winnr . 'wincmd w' |
|
166 | 166 | endif |
|
167 | 167 | endfunction |
|
168 | 168 | |
|
169 | 169 | command! -nargs=+ -complete=expression Pecho call s:Pecho(<args>) |
|
170 | 170 | "}}} |
|
171 | 171 | |
|
172 | 172 | function! <SID>PR_checkBinary(BinaryName) "{{{ |
|
173 | 173 | " Verify that BinaryName is specified or available |
|
174 | 174 | if ! exists('g:patchreview_' . a:BinaryName) |
|
175 | 175 | if executable(a:BinaryName) |
|
176 | 176 | let g:patchreview_{a:BinaryName} = a:BinaryName |
|
177 | 177 | return 1 |
|
178 | 178 | else |
|
179 | 179 | Pecho 'g:patchreview_' . a:BinaryName . ' is not defined and ' . a:BinaryName . ' command could not be found on path.' |
|
180 | 180 | Pecho 'Please define it in your .vimrc.' |
|
181 | 181 | return 0 |
|
182 | 182 | endif |
|
183 | 183 | elseif ! executable(g:patchreview_{a:BinaryName}) |
|
184 | 184 | Pecho 'Specified g:patchreview_' . a:BinaryName . ' [' . g:patchreview_{a:BinaryName} . '] is not executable.' |
|
185 | 185 | return 0 |
|
186 | 186 | else |
|
187 | 187 | return 1 |
|
188 | 188 | endif |
|
189 | 189 | endfunction |
|
190 | 190 | "}}} |
|
191 | 191 | |
|
192 | 192 | function! <SID>ExtractDiffsNative(...) "{{{ |
|
193 | 193 | " Sets g:patches = {'reason':'', 'patch':[ |
|
194 | 194 | " { |
|
195 | 195 | " 'filename': filepath |
|
196 | 196 | " 'type' : '+' | '-' | '!' |
|
197 | 197 | " 'content' : patch text for this file |
|
198 | 198 | " }, |
|
199 | 199 | " ... |
|
200 | 200 | " ]} |
|
201 | 201 | let g:patches = {'reason' : '', 'patch' : []} |
|
202 | 202 | " TODO : User pointers into lines list rather then use collect |
|
203 | 203 | if a:0 == 0 |
|
204 | 204 | let g:patches['reason'] = "ExtractDiffsNative expects at least a patchfile argument" |
|
205 | 205 | return |
|
206 | 206 | endif |
|
207 | 207 | let patchfile = expand(a:1, ':p') |
|
208 | 208 | if a:0 > 1 |
|
209 | 209 | let patch = a:2 |
|
210 | 210 | endif |
|
211 | 211 | if ! filereadable(patchfile) |
|
212 | 212 | let g:patches['reason'] = "File " . patchfile . " is not readable" |
|
213 | 213 | return |
|
214 | 214 | endif |
|
215 | 215 | unlet! filterdiffcmd |
|
216 | 216 | let filterdiffcmd = '' . g:patchreview_filterdiff . ' --list -s ' . patchfile |
|
217 | 217 | let fileslist = split(system(filterdiffcmd), '[\r\n]') |
|
218 | 218 | for filewithchangetype in fileslist |
|
219 | 219 | if filewithchangetype !~ '^[!+-] ' |
|
220 | 220 | Pecho '*** Skipping review generation due to unknown change for [' . filewithchangetype . ']' |
|
221 | 221 | continue |
|
222 | 222 | endif |
|
223 | 223 | |
|
224 | 224 | unlet! this_patch |
|
225 | 225 | let this_patch = {} |
|
226 | 226 | |
|
227 | 227 | unlet! relpath |
|
228 | 228 | let relpath = substitute(filewithchangetype, '^. ', '', '') |
|
229 | 229 | |
|
230 | 230 | let this_patch['filename'] = relpath |
|
231 | 231 | |
|
232 | 232 | if filewithchangetype =~ '^! ' |
|
233 | 233 | let this_patch['type'] = '!' |
|
234 | 234 | elseif filewithchangetype =~ '^+ ' |
|
235 | 235 | let this_patch['type'] = '+' |
|
236 | 236 | elseif filewithchangetype =~ '^- ' |
|
237 | 237 | let this_patch['type'] = '-' |
|
238 | 238 | endif |
|
239 | 239 | |
|
240 | 240 | unlet! filterdiffcmd |
|
241 | 241 | let filterdiffcmd = '' . g:patchreview_filterdiff . ' -i ' . relpath . ' ' . patchfile |
|
242 | 242 | let this_patch['content'] = split(system(filterdiffcmd), '[\n\r]') |
|
243 | 243 | let g:patches['patch'] += [this_patch] |
|
244 | 244 | Debug "Patch collected for " . relpath |
|
245 | 245 | endfor |
|
246 | 246 | endfunction |
|
247 | 247 | "}}} |
|
248 | 248 | |
|
249 | 249 | function! <SID>ExtractDiffsPureVim(...) "{{{ |
|
250 | 250 | " Sets g:patches = {'reason':'', 'patch':[ |
|
251 | 251 | " { |
|
252 | 252 | " 'filename': filepath |
|
253 | 253 | " 'type' : '+' | '-' | '!' |
|
254 | 254 | " 'content' : patch text for this file |
|
255 | 255 | " }, |
|
256 | 256 | " ... |
|
257 | 257 | " ]} |
|
258 | 258 | let g:patches = {'reason' : '', 'patch' : []} |
|
259 | 259 | " TODO : User pointers into lines list rather then use collect |
|
260 | 260 | if a:0 == 0 |
|
261 | 261 | let g:patches['reason'] = "ExtractDiffsPureVim expects at least a patchfile argument" |
|
262 | 262 | return |
|
263 | 263 | endif |
|
264 | 264 | let patchfile = expand(a:1, ':p') |
|
265 | 265 | if a:0 > 1 |
|
266 | 266 | let patch = a:2 |
|
267 | 267 | endif |
|
268 | 268 | if ! filereadable(patchfile) |
|
269 | 269 | let g:patches['reason'] = "File " . patchfile . " is not readable" |
|
270 | 270 | return |
|
271 | 271 | endif |
|
272 | 272 | call s:PR_wipeMsgBuf() |
|
273 | 273 | let collect = [] |
|
274 | 274 | let linum = 0 |
|
275 | 275 | let lines = readfile(patchfile) |
|
276 | 276 | let linescount = len(lines) |
|
277 | 277 | State 'START' |
|
278 | 278 | while linum < linescount |
|
279 | 279 | let line = lines[linum] |
|
280 | 280 | let linum += 1 |
|
281 | 281 | if State() == 'START' |
|
282 | 282 | let mat = matchlist(line, '^--- \([^\t]\+\).*$') |
|
283 | 283 | if ! empty(mat) && mat[1] != '' |
|
284 | 284 | State 'MAYBE_UNIFIED_DIFF' |
|
285 | 285 | let p_first_file = mat[1] |
|
286 | 286 | let collect = [line] |
|
287 | 287 | Debug line . State() |
|
288 | 288 | continue |
|
289 | 289 | endif |
|
290 | 290 | let mat = matchlist(line, '^\*\*\* \([^\t]\+\).*$') |
|
291 | 291 | if ! empty(mat) && mat[1] != '' |
|
292 | 292 | State 'MAYBE_CONTEXT_DIFF' |
|
293 | 293 | let p_first_file = mat[1] |
|
294 | 294 | let collect = [line] |
|
295 | 295 | Debug line . State() |
|
296 | 296 | continue |
|
297 | 297 | endif |
|
298 | 298 | continue |
|
299 | 299 | elseif State() == 'MAYBE_CONTEXT_DIFF' |
|
300 | 300 | let mat = matchlist(line, '^--- \([^\t]\+\).*$') |
|
301 | 301 | if empty(mat) || mat[1] == '' |
|
302 | 302 | State 'START' |
|
303 | 303 | let linum -= 1 |
|
304 | 304 | continue |
|
305 | 305 | Debug 'Back to square one ' . line() |
|
306 | 306 | endif |
|
307 | 307 | let p_second_file = mat[1] |
|
308 | 308 | if p_first_file == '/dev/null' |
|
309 | 309 | if p_second_file == '/dev/null' |
|
310 | 310 | let g:patches['reason'] = "Malformed diff found at line " . linum |
|
311 | 311 | return |
|
312 | 312 | endif |
|
313 | 313 | let p_type = '+' |
|
314 | 314 | let filepath = p_second_file |
|
315 | 315 | else |
|
316 | 316 | if p_second_file == '/dev/null' |
|
317 | 317 | let p_type = '-' |
|
318 | 318 | let filepath = p_first_file |
|
319 | 319 | else |
|
320 | 320 | let p_type = '!' |
|
321 | 321 | let filepath = p_first_file |
|
322 | 322 | endif |
|
323 | 323 | endif |
|
324 | 324 | State 'EXPECT_15_STARS' |
|
325 | 325 | let collect += [line] |
|
326 | 326 | Debug line . State() |
|
327 | 327 | elseif State() == 'EXPECT_15_STARS' |
|
328 | 328 | if line !~ '^*\{15}$' |
|
329 | 329 | State 'START' |
|
330 | 330 | let linum -= 1 |
|
331 | 331 | Debug line . State() |
|
332 | 332 | continue |
|
333 | 333 | endif |
|
334 | 334 | State 'EXPECT_CONTEXT_CHUNK_HEADER_1' |
|
335 | 335 | let collect += [line] |
|
336 | 336 | Debug line . State() |
|
337 | 337 | elseif State() == 'EXPECT_CONTEXT_CHUNK_HEADER_1' |
|
338 | 338 | let mat = matchlist(line, '^\*\*\* \(\d\+,\)\?\(\d\+\) \*\*\*\*$') |
|
339 | 339 | if empty(mat) || mat[1] == '' |
|
340 | 340 | State 'START' |
|
341 | 341 | let linum -= 1 |
|
342 | 342 | Debug line . State() |
|
343 | 343 | continue |
|
344 | 344 | endif |
|
345 | 345 | let collect += [line] |
|
346 | 346 | State 'SKIP_CONTEXT_STUFF_1' |
|
347 | 347 | Debug line . State() |
|
348 | 348 | continue |
|
349 | 349 | elseif State() == 'SKIP_CONTEXT_STUFF_1' |
|
350 | 350 | if line !~ '^[ !+].*$' |
|
351 | 351 | let mat = matchlist(line, '^--- \(\d\+\),\(\d\+\) ----$') |
|
352 | 352 | if ! empty(mat) && mat[1] != '' && mat[2] != '' |
|
353 | 353 | let goal_count = mat[2] - mat[1] + 1 |
|
354 | 354 | let c_count = 0 |
|
355 | 355 | State 'READ_CONTEXT_CHUNK' |
|
356 | 356 | let collect += [line] |
|
357 | 357 | Debug line . State() . " Goal count set to " . goal_count |
|
358 | 358 | continue |
|
359 | 359 | endif |
|
360 | 360 | State 'START' |
|
361 | 361 | let linum -= 1 |
|
362 | 362 | Debug line . State() |
|
363 | 363 | continue |
|
364 | 364 | endif |
|
365 | 365 | let collect += [line] |
|
366 | 366 | continue |
|
367 | 367 | elseif State() == 'READ_CONTEXT_CHUNK' |
|
368 | 368 | let c_count += 1 |
|
369 | 369 | if c_count == goal_count |
|
370 | 370 | let collect += [line] |
|
371 | 371 | State 'BACKSLASH_OR_CRANGE_EOF' |
|
372 | 372 | continue |
|
373 | 373 | else " goal not met yet |
|
374 | 374 | let mat = matchlist(line, '^\([\\!+ ]\).*$') |
|
375 | 375 | if empty(mat) || mat[1] == '' |
|
376 | 376 | let linum -= 1 |
|
377 | 377 | State 'START' |
|
378 | 378 | Debug line . State() |
|
379 | 379 | continue |
|
380 | 380 | endif |
|
381 | 381 | let collect += [line] |
|
382 | 382 | continue |
|
383 | 383 | endif |
|
384 | 384 | elseif State() == 'BACKSLASH_OR_CRANGE_EOF' |
|
385 | 385 | if line =~ '^\\ No newline.*$' " XXX: Can we go to another chunk from here?? |
|
386 | 386 | let collect += [line] |
|
387 | 387 | let this_patch = {} |
|
388 | 388 | let this_patch['filename'] = filepath |
|
389 | 389 | let this_patch['type'] = p_type |
|
390 | 390 | let this_patch['content'] = collect |
|
391 | 391 | let g:patches['patch'] += [this_patch] |
|
392 | 392 | Debug "Patch collected for " . filepath |
|
393 | 393 | State 'START' |
|
394 | 394 | continue |
|
395 | 395 | endif |
|
396 | 396 | if line =~ '^\*\{15}$' |
|
397 | 397 | let collect += [line] |
|
398 | 398 | State 'EXPECT_CONTEXT_CHUNK_HEADER_1' |
|
399 | 399 | Debug line . State() |
|
400 | 400 | continue |
|
401 | 401 | endif |
|
402 | 402 | let this_patch = {} |
|
403 | 403 | let this_patch['filename'] = filepath |
|
404 | 404 | let this_patch['type'] = p_type |
|
405 | 405 | let this_patch['content'] = collect |
|
406 | 406 | let g:patches['patch'] += [this_patch] |
|
407 | 407 | let linum -= 1 |
|
408 | 408 | State 'START' |
|
409 | 409 | Debug "Patch collected for " . filepath |
|
410 | 410 | Debug line . State() |
|
411 | 411 | continue |
|
412 | 412 | elseif State() == 'MAYBE_UNIFIED_DIFF' |
|
413 | 413 | let mat = matchlist(line, '^+++ \([^\t]\+\).*$') |
|
414 | 414 | if empty(mat) || mat[1] == '' |
|
415 | 415 | State 'START' |
|
416 | 416 | let linum -= 1 |
|
417 | 417 | Debug line . State() |
|
418 | 418 | continue |
|
419 | 419 | endif |
|
420 | 420 | let p_second_file = mat[1] |
|
421 | 421 | if p_first_file == '/dev/null' |
|
422 | 422 | if p_second_file == '/dev/null' |
|
423 | 423 | let g:patches['reason'] = "Malformed diff found at line " . linum |
|
424 | 424 | return |
|
425 | 425 | endif |
|
426 | 426 | let p_type = '+' |
|
427 | 427 | let filepath = p_second_file |
|
428 | 428 | else |
|
429 | 429 | if p_second_file == '/dev/null' |
|
430 | 430 | let p_type = '-' |
|
431 | 431 | let filepath = p_first_file |
|
432 | 432 | else |
|
433 | 433 | let p_type = '!' |
|
434 | 434 | let filepath = p_first_file |
|
435 | 435 | endif |
|
436 | 436 | endif |
|
437 | 437 | State 'EXPECT_UNIFIED_RANGE_CHUNK' |
|
438 | 438 | let collect += [line] |
|
439 | 439 | Debug line . State() |
|
440 | 440 | continue |
|
441 | 441 | elseif State() == 'EXPECT_UNIFIED_RANGE_CHUNK' |
|
442 | 442 | let mat = matchlist(line, '^@@ -\(\d\+,\)\?\(\d\+\) +\(\d\+,\)\?\(\d\+\) @@$') |
|
443 | 443 | if ! empty(mat) |
|
444 | 444 | let old_goal_count = mat[2] |
|
445 | 445 | let new_goal_count = mat[4] |
|
446 | 446 | let o_count = 0 |
|
447 | 447 | let n_count = 0 |
|
448 | 448 | Debug "Goal count set to " . old_goal_count . ', ' . new_goal_count |
|
449 | 449 | State 'READ_UNIFIED_CHUNK' |
|
450 | 450 | let collect += [line] |
|
451 | 451 | Debug line . State() |
|
452 | 452 | continue |
|
453 | 453 | endif |
|
454 | 454 | State 'START' |
|
455 | 455 | Debug line . State() |
|
456 | 456 | continue |
|
457 | 457 | elseif State() == 'READ_UNIFIED_CHUNK' |
|
458 | 458 | if o_count == old_goal_count && n_count == new_goal_count |
|
459 | 459 | if line =~ '^\\.*$' " XXX: Can we go to another chunk from here?? |
|
460 | 460 | let collect += [line] |
|
461 | 461 | let this_patch = {} |
|
462 | 462 | let this_patch['filename'] = filepath |
|
463 | 463 | let this_patch['type'] = p_type |
|
464 | 464 | let this_patch['content'] = collect |
|
465 | 465 | let g:patches['patch'] += [this_patch] |
|
466 | 466 | Debug "Patch collected for " . filepath |
|
467 | 467 | State 'START' |
|
468 | 468 | continue |
|
469 | 469 | endif |
|
470 | 470 | let mat = matchlist(line, '^@@ -\(\d\+,\)\?\(\d\+\) +\(\d\+,\)\?\(\d\+\) @@$') |
|
471 | 471 | if ! empty(mat) |
|
472 | 472 | let old_goal_count = mat[2] |
|
473 | 473 | let new_goal_count = mat[4] |
|
474 | 474 | let o_count = 0 |
|
475 | 475 | let n_count = 0 |
|
476 | 476 | Debug "Goal count set to " . old_goal_count . ', ' . new_goal_count |
|
477 | 477 | let collect += [line] |
|
478 | 478 | Debug line . State() |
|
479 | 479 | continue |
|
480 | 480 | endif |
|
481 | 481 | let this_patch = {} |
|
482 | 482 | let this_patch['filename'] = filepath |
|
483 | 483 | let this_patch['type'] = p_type |
|
484 | 484 | let this_patch['content'] = collect |
|
485 | 485 | let g:patches['patch'] += [this_patch] |
|
486 | 486 | Debug "Patch collected for " . filepath |
|
487 | 487 | let linum -= 1 |
|
488 | 488 | State 'START' |
|
489 | 489 | Debug line . State() |
|
490 | 490 | continue |
|
491 | 491 | else " goal not met yet |
|
492 | 492 | let mat = matchlist(line, '^\([\\+ -]\).*$') |
|
493 | 493 | if empty(mat) || mat[1] == '' |
|
494 | 494 | let linum -= 1 |
|
495 | 495 | State 'START' |
|
496 | 496 | continue |
|
497 | 497 | endif |
|
498 | 498 | let chr = mat[1] |
|
499 | 499 | if chr == '+' |
|
500 | 500 | let n_count += 1 |
|
501 | 501 | endif |
|
502 | 502 | if chr == ' ' |
|
503 | 503 | let o_count += 1 |
|
504 | 504 | let n_count += 1 |
|
505 | 505 | endif |
|
506 | 506 | if chr == '-' |
|
507 | 507 | let o_count += 1 |
|
508 | 508 | endif |
|
509 | 509 | let collect += [line] |
|
510 | 510 | Debug line . State() |
|
511 | 511 | continue |
|
512 | 512 | endif |
|
513 | 513 | else |
|
514 | 514 | let g:patches['reason'] = "Internal error: Do not use the plugin anymore and if possible please send the diff or patch file you tried it with to Manpreet Singh <junkblocker@yahoo.com>" |
|
515 | 515 | return |
|
516 | 516 | endif |
|
517 | 517 | endwhile |
|
518 | 518 | "Pecho State() |
|
519 | 519 | if (State() == 'READ_CONTEXT_CHUNK' && c_count == goal_count) || (State() == 'READ_UNIFIED_CHUNK' && n_count == new_goal_count && o_count == old_goal_count) |
|
520 | 520 | let this_patch = {} |
|
521 | 521 | let this_patch['filename'] = filepath |
|
522 | 522 | let this_patch['type'] = p_type |
|
523 | 523 | let this_patch['content'] = collect |
|
524 | 524 | let g:patches['patch'] += [this_patch] |
|
525 | 525 | Debug "Patch collected for " . filepath |
|
526 | 526 | endif |
|
527 | 527 | return |
|
528 | 528 | endfunction |
|
529 | 529 | "}}} |
|
530 | 530 | |
|
531 | 531 | function! State(...) " For easy manipulation of diff extraction state "{{{ |
|
532 | 532 | if a:0 != 0 |
|
533 | 533 | let s:STATE = a:1 |
|
534 | 534 | else |
|
535 | 535 | if ! exists('s:STATE') |
|
536 | 536 | let s:STATE = 'START' |
|
537 | 537 | endif |
|
538 | 538 | return s:STATE |
|
539 | 539 | endif |
|
540 | 540 | endfunction |
|
541 | 541 | com! -nargs=+ -complete=expression State call State(<args>) |
|
542 | 542 | "}}} |
|
543 | 543 | |
|
544 | 544 | function! <SID>PatchReview(...) "{{{ |
|
545 | 545 | let s:save_shortmess = &shortmess |
|
546 | 546 | let s:save_aw = &autowrite |
|
547 | 547 | let s:save_awa = &autowriteall |
|
548 | 548 | set shortmess=aW |
|
549 | 549 | call s:PR_wipeMsgBuf() |
|
550 | 550 | let s:reviewmode = 'patch' |
|
551 | 551 | call s:_GenericReview(a:000) |
|
552 | 552 | let &autowriteall = s:save_awa |
|
553 | 553 | let &autowrite = s:save_aw |
|
554 | 554 | let &shortmess = s:save_shortmess |
|
555 | 555 | endfunction |
|
556 | 556 | "}}} |
|
557 | 557 | |
|
558 | 558 | function! <SID>_GenericReview(argslist) "{{{ |
|
559 | 559 | " diff mode: |
|
560 | 560 | " arg1 = patchfile |
|
561 | 561 | " arg2 = strip count |
|
562 | 562 | " patch mode: |
|
563 | 563 | " arg1 = patchfile |
|
564 | 564 | " arg2 = strip count |
|
565 | 565 | " arg3 = directory |
|
566 | 566 | |
|
567 | 567 | " VIM 7+ required |
|
568 | 568 | if version < 700 |
|
569 | 569 | Pecho 'This plugin needs VIM 7 or higher' |
|
570 | 570 | return |
|
571 | 571 | endif |
|
572 | 572 | |
|
573 | 573 | " +diff required |
|
574 | 574 | if ! has('diff') |
|
575 | 575 | Pecho 'This plugin needs VIM built with +diff feature.' |
|
576 | 576 | return |
|
577 | 577 | endif |
|
578 | 578 | |
|
579 | 579 | |
|
580 | 580 | if s:reviewmode == 'diff' |
|
581 | 581 | let patch_R_option = ' -t -R ' |
|
582 | 582 | elseif s:reviewmode == 'patch' |
|
583 | 583 | let patch_R_option = '' |
|
584 | 584 | else |
|
585 | 585 | Pecho 'Fatal internal error in patchreview.vim plugin' |
|
586 | 586 | return |
|
587 | 587 | endif |
|
588 | 588 | |
|
589 | 589 | " Check passed arguments |
|
590 | 590 | if len(a:argslist) == 0 |
|
591 | 591 | Pecho 'PatchReview command needs at least one argument specifying a patchfile path.' |
|
592 | 592 | return |
|
593 | 593 | endif |
|
594 | 594 | let StripCount = 0 |
|
595 | 595 | if len(a:argslist) >= 1 && ((s:reviewmode == 'patch' && len(a:argslist) <= 3) || (s:reviewmode == 'diff' && len(a:argslist) == 2)) |
|
596 | 596 | let PatchFilePath = expand(a:argslist[0], ':p') |
|
597 | 597 | if ! filereadable(PatchFilePath) |
|
598 | 598 | Pecho 'File [' . PatchFilePath . '] is not accessible.' |
|
599 | 599 | return |
|
600 | 600 | endif |
|
601 | 601 | if len(a:argslist) >= 2 && s:reviewmode == 'patch' |
|
602 | 602 | let s:SrcDirectory = expand(a:argslist[1], ':p') |
|
603 | 603 | if ! isdirectory(s:SrcDirectory) |
|
604 | 604 | Pecho '[' . s:SrcDirectory . '] is not a directory' |
|
605 | 605 | return |
|
606 | 606 | endif |
|
607 | 607 | try |
|
608 | 608 | " Command line has already escaped the path |
|
609 | 609 | exe 'cd ' . s:SrcDirectory |
|
610 | 610 | catch /^.*E344.*/ |
|
611 | 611 | Pecho 'Could not change to directory [' . s:SrcDirectory . ']' |
|
612 | 612 | return |
|
613 | 613 | endtry |
|
614 | 614 | endif |
|
615 | 615 | if s:reviewmode == 'diff' |
|
616 | 616 | " passed in by default |
|
617 | 617 | let StripCount = eval(a:argslist[1]) |
|
618 | 618 | elseif s:reviewmode == 'patch' |
|
619 | 619 | let StripCount = 1 |
|
620 | 620 | " optional strip count |
|
621 | 621 | if len(a:argslist) == 3 |
|
622 | 622 | let StripCount = eval(a:argslist[2]) |
|
623 | 623 | endif |
|
624 | 624 | endif |
|
625 | 625 | else |
|
626 | 626 | if s:reviewmode == 'patch' |
|
627 | 627 | Pecho 'PatchReview command needs at most three arguments: patchfile path, optional source directory path and optional strip count.' |
|
628 | 628 | elseif s:reviewmode == 'diff' |
|
629 | 629 | Pecho 'DiffReview command accepts no arguments.' |
|
630 | 630 | endif |
|
631 | 631 | return |
|
632 | 632 | endif |
|
633 | 633 | |
|
634 | 634 | " Verify that patch command and temporary directory are available or specified |
|
635 | 635 | if ! s:PR_checkBinary('patch') |
|
636 | 636 | return |
|
637 | 637 | endif |
|
638 | 638 | |
|
639 | 639 | " Requirements met, now execute |
|
640 | 640 | let PatchFilePath = fnamemodify(PatchFilePath, ':p') |
|
641 | 641 | if s:reviewmode == 'patch' |
|
642 | 642 | Pecho 'Patch file : ' . PatchFilePath |
|
643 | 643 | endif |
|
644 | 644 | Pecho 'Source directory: ' . getcwd() |
|
645 | 645 | Pecho '------------------' |
|
646 | 646 | if s:PR_checkBinary('filterdiff') |
|
647 | 647 | Debug "Using filterdiff" |
|
648 | 648 | call s:ExtractDiffsNative(PatchFilePath) |
|
649 | 649 | else |
|
650 | 650 | Debug "Using own diff extraction (slower)" |
|
651 | 651 | call s:ExtractDiffsPureVim(PatchFilePath) |
|
652 | 652 | endif |
|
653 | 653 | for patch in g:patches['patch'] |
|
654 | 654 | if patch.type !~ '^[!+-]$' |
|
655 | 655 | Pecho '*** Skipping review generation due to unknown change [' . patch.type . ']', 1 |
|
656 | 656 | continue |
|
657 | 657 | endif |
|
658 | 658 | unlet! relpath |
|
659 | 659 | let relpath = patch.filename |
|
660 | 660 | " XXX: svn diff and hg diff produce different kind of outputs, one requires |
|
661 | 661 | " XXX: stripping but the other doesn't. We need to take care of that |
|
662 | 662 | let stripmore = StripCount |
|
663 | 663 | let StrippedRelativeFilePath = relpath |
|
664 | 664 | while stripmore > 0 |
|
665 | 665 | " strip one |
|
666 | 666 | let StrippedRelativeFilePath = substitute(StrippedRelativeFilePath, '^[^\\\/]\+[^\\\/]*[\\\/]' , '' , '') |
|
667 | 667 | let stripmore -= 1 |
|
668 | 668 | endwhile |
|
669 | 669 | if patch.type == '!' |
|
670 | 670 | if s:reviewmode == 'patch' |
|
671 | 671 | let msgtype = 'Patch modifies file: ' |
|
672 | 672 | elseif s:reviewmode == 'diff' |
|
673 | 673 | let msgtype = 'File has changes: ' |
|
674 | 674 | endif |
|
675 | 675 | elseif patch.type == '+' |
|
676 | 676 | if s:reviewmode == 'patch' |
|
677 | 677 | let msgtype = 'Patch adds file : ' |
|
678 | 678 | elseif s:reviewmode == 'diff' |
|
679 | 679 | let msgtype = 'New file : ' |
|
680 | 680 | endif |
|
681 | 681 | elseif patch.type == '-' |
|
682 | 682 | if s:reviewmode == 'patch' |
|
683 | 683 | let msgtype = 'Patch removes file : ' |
|
684 | 684 | elseif s:reviewmode == 'diff' |
|
685 | 685 | let msgtype = 'Removed file : ' |
|
686 | 686 | endif |
|
687 | 687 | endif |
|
688 | 688 | let bufnum = bufnr(relpath) |
|
689 | 689 | if buflisted(bufnum) && getbufvar(bufnum, '&mod') |
|
690 | 690 | Pecho 'Old buffer for file [' . relpath . '] exists in modified state. Skipping review.', 1 |
|
691 | 691 | continue |
|
692 | 692 | endif |
|
693 | 693 | let tmpname = tempname() |
|
694 | 694 | |
|
695 | 695 | " write patch for patch.filename into tmpname |
|
696 | 696 | call writefile(patch.content, tmpname) |
|
697 | 697 | if patch.type == '+' && s:reviewmode == 'patch' |
|
698 | 698 | let inputfile = '' |
|
699 | 699 | let patchcmd = '!' . g:patchreview_patch . patch_R_option . ' -o "' . tmpname . '.file" "' . inputfile . '" < "' . tmpname . '"' |
|
700 | 700 | elseif patch.type == '+' && s:reviewmode == 'diff' |
|
701 | 701 | let inputfile = '' |
|
702 | 702 | unlet! patchcmd |
|
703 | 703 | else |
|
704 | 704 | let inputfile = expand(StrippedRelativeFilePath, ':p') |
|
705 | 705 | let patchcmd = '!' . g:patchreview_patch . patch_R_option . ' -o "' . tmpname . '.file" "' . inputfile . '" < "' . tmpname . '"' |
|
706 | 706 | endif |
|
707 | 707 | if exists('patchcmd') |
|
708 | 708 | let v:errmsg = '' |
|
709 | 709 | Debug patchcmd |
|
710 | 710 | silent exe patchcmd |
|
711 | 711 | if v:errmsg != '' || v:shell_error |
|
712 | 712 | Pecho 'ERROR: Could not execute patch command.' |
|
713 | 713 | Pecho 'ERROR: ' . patchcmd |
|
714 | 714 | Pecho 'ERROR: ' . v:errmsg |
|
715 | 715 | Pecho 'ERROR: Diff skipped.' |
|
716 | 716 | continue |
|
717 | 717 | endif |
|
718 | 718 | endif |
|
719 | 719 | call delete(tmpname) |
|
720 | 720 | let s:origtabpagenr = tabpagenr() |
|
721 | 721 | silent! exe 'tabedit ' . StrippedRelativeFilePath |
|
722 | 722 | if exists('patchcmd') |
|
723 |
" modelines in loaded files mess with diff comparis |
|
|
723 | " modelines in loaded files mess with diff comparison | |
|
724 | 724 | let s:keep_modeline=&modeline |
|
725 | 725 | let &modeline=0 |
|
726 | 726 | silent! exe 'vert diffsplit ' . tmpname . '.file' |
|
727 | 727 | setlocal buftype=nofile |
|
728 | 728 | setlocal noswapfile |
|
729 | 729 | setlocal syntax=none |
|
730 | 730 | setlocal bufhidden=delete |
|
731 | 731 | setlocal nobuflisted |
|
732 | 732 | setlocal modifiable |
|
733 | 733 | setlocal nowrap |
|
734 | 734 | " Remove buffer name |
|
735 | 735 | silent! 0f |
|
736 | 736 | " Switch to original to get a nice tab title |
|
737 | 737 | silent! wincmd p |
|
738 | 738 | let &modeline=s:keep_modeline |
|
739 | 739 | else |
|
740 | 740 | silent! exe 'vnew' |
|
741 | 741 | endif |
|
742 | 742 | if filereadable(tmpname . '.file.rej') |
|
743 | 743 | silent! exe 'topleft 5split ' . tmpname . '.file.rej' |
|
744 | 744 | Pecho msgtype . '*** REJECTED *** ' . relpath, 1 |
|
745 | 745 | else |
|
746 | 746 | Pecho msgtype . ' ' . relpath, 1 |
|
747 | 747 | endif |
|
748 | 748 | silent! exe 'tabn ' . s:origtabpagenr |
|
749 | 749 | endfor |
|
750 | 750 | Pecho '-----' |
|
751 | 751 | Pecho 'Done.' |
|
752 | 752 | |
|
753 | 753 | endfunction |
|
754 | 754 | "}}} |
|
755 | 755 | |
|
756 | 756 | function! <SID>DiffReview(...) "{{{ |
|
757 | 757 | let s:save_shortmess = &shortmess |
|
758 | 758 | set shortmess=aW |
|
759 | 759 | call s:PR_wipeMsgBuf() |
|
760 | 760 | |
|
761 | 761 | let vcsdict = { |
|
762 | 762 | \'Mercurial' : {'dir' : '.hg', 'binary' : 'hg', 'diffargs' : 'diff' , 'strip' : 1}, |
|
763 | 763 | \'Bazaar-NG' : {'dir' : '.bzr', 'binary' : 'bzr', 'diffargs' : 'diff' , 'strip' : 0}, |
|
764 | 764 | \'monotone' : {'dir' : '_MTN', 'binary' : 'mtn', 'diffargs' : 'diff --unified', 'strip' : 0}, |
|
765 | 765 | \'Subversion' : {'dir' : '.svn', 'binary' : 'svn', 'diffargs' : 'diff' , 'strip' : 0}, |
|
766 | 766 | \'cvs' : {'dir' : 'CVS', 'binary' : 'cvs', 'diffargs' : '-q diff -u' , 'strip' : 0}, |
|
767 | 767 | \} |
|
768 | 768 | |
|
769 | 769 | unlet! s:theDiffCmd |
|
770 | 770 | unlet! l:vcs |
|
771 | 771 | if ! exists('g:patchreview_diffcmd') |
|
772 | 772 | for key in keys(vcsdict) |
|
773 | 773 | if isdirectory(vcsdict[key]['dir']) |
|
774 | 774 | if ! s:PR_checkBinary(vcsdict[key]['binary']) |
|
775 | 775 | Pecho 'Current directory looks like a ' . vcsdict[key] . ' repository but ' . vcsdist[key]['binary'] . ' command was not found on path.' |
|
776 | 776 | let &shortmess = s:save_shortmess |
|
777 | 777 | return |
|
778 | 778 | else |
|
779 | 779 | let s:theDiffCmd = vcsdict[key]['binary'] . ' ' . vcsdict[key]['diffargs'] |
|
780 | 780 | let strip = vcsdict[key]['strip'] |
|
781 | 781 | |
|
782 | 782 | Pecho 'Using [' . s:theDiffCmd . '] to generate diffs for this ' . key . ' review.' |
|
783 | 783 | let &shortmess = s:save_shortmess |
|
784 | 784 | let l:vcs = vcsdict[key]['binary'] |
|
785 | 785 | break |
|
786 | 786 | endif |
|
787 | 787 | else |
|
788 | 788 | continue |
|
789 | 789 | endif |
|
790 | 790 | endfor |
|
791 | 791 | else |
|
792 | 792 | let s:theDiffCmd = g:patchreview_diffcmd |
|
793 | 793 | let strip = 0 |
|
794 | 794 | endif |
|
795 | 795 | if ! exists('s:theDiffCmd') |
|
796 | 796 | Pecho 'Please define g:patchreview_diffcmd and make sure you are in a VCS controlled top directory.' |
|
797 | 797 | let &shortmess = s:save_shortmess |
|
798 | 798 | return |
|
799 | 799 | endif |
|
800 | 800 | |
|
801 | 801 | let outfile = tempname() |
|
802 | 802 | let cmd = s:theDiffCmd . ' > "' . outfile . '"' |
|
803 | 803 | let v:errmsg = '' |
|
804 | 804 | let cout = system(cmd) |
|
805 | 805 | if v:errmsg == '' && exists('l:vcs') && l:vcs == 'cvs' && v:shell_error == 1 |
|
806 | 806 | " Ignoring CVS non-error |
|
807 | 807 | elseif v:errmsg != '' || v:shell_error |
|
808 | 808 | Pecho v:errmsg |
|
809 | 809 | Pecho 'Could not execute [' . s:theDiffCmd . ']' |
|
810 | 810 | Pecho 'Error code: ' . v:shell_error |
|
811 | 811 | Pecho cout |
|
812 | 812 | Pecho 'Diff review aborted.' |
|
813 | 813 | let &shortmess = s:save_shortmess |
|
814 | 814 | return |
|
815 | 815 | endif |
|
816 | 816 | let s:reviewmode = 'diff' |
|
817 | 817 | call s:_GenericReview([outfile, strip]) |
|
818 | 818 | let &shortmess = s:save_shortmess |
|
819 | 819 | endfunction |
|
820 | 820 | "}}} |
|
821 | 821 | |
|
822 | 822 | " End user commands "{{{ |
|
823 | 823 | "============================================================================ |
|
824 | 824 | " :PatchReview |
|
825 | 825 | command! -nargs=* -complete=file PatchReview call s:PatchReview (<f-args>) |
|
826 | 826 | |
|
827 | 827 | " :DiffReview |
|
828 | 828 | command! -nargs=0 DiffReview call s:DiffReview() |
|
829 | 829 | "}}} |
|
830 | 830 | |
|
831 | 831 | " Development "{{{ |
|
832 | 832 | if exists('g:patchreview_debug') |
|
833 | 833 | " Tests |
|
834 | 834 | function! <SID>PRExtractTestNative(...) |
|
835 | 835 | "let patchfiles = glob(expand(a:1) . '/?*') |
|
836 | 836 | "for fname in split(patchfiles) |
|
837 | 837 | call s:PR_wipeMsgBuf() |
|
838 | 838 | let fname = a:1 |
|
839 | 839 | call s:ExtractDiffsNative(fname) |
|
840 | 840 | for patch in g:patches['patch'] |
|
841 | 841 | for line in patch.content |
|
842 | 842 | Pecho line |
|
843 | 843 | endfor |
|
844 | 844 | endfor |
|
845 | 845 | "endfor |
|
846 | 846 | endfunction |
|
847 | 847 | |
|
848 | 848 | function! <SID>PRExtractTestVim(...) |
|
849 | 849 | "let patchfiles = glob(expand(a:1) . '/?*') |
|
850 | 850 | "for fname in split(patchfiles) |
|
851 | 851 | call s:PR_wipeMsgBuf() |
|
852 | 852 | let fname = a:1 |
|
853 | 853 | call s:ExtractDiffsPureVim(fname) |
|
854 | 854 | for patch in g:patches['patch'] |
|
855 | 855 | for line in patch.content |
|
856 | 856 | Pecho line |
|
857 | 857 | endfor |
|
858 | 858 | endfor |
|
859 | 859 | "endfor |
|
860 | 860 | endfunction |
|
861 | 861 | |
|
862 | 862 | command! -nargs=+ -complete=file PRTestVim call s:PRExtractTestVim(<f-args>) |
|
863 | 863 | command! -nargs=+ -complete=file PRTestNative call s:PRExtractTestNative(<f-args>) |
|
864 | 864 | endif |
|
865 | 865 | "}}} |
|
866 | 866 | |
|
867 | 867 | " modeline |
|
868 | 868 | " vim: set et fdl=0 fdm=marker fenc=latin ff=unix ft=vim sw=2 sts=0 ts=2 textwidth=78 nowrap : |
@@ -1,643 +1,643 | |||
|
1 | 1 | # chgserver.py - command server extension for cHg |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2011 Yuya Nishihara <yuya@tcha.org> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | """command server extension for cHg (EXPERIMENTAL) |
|
9 | 9 | |
|
10 | 10 | 'S' channel (read/write) |
|
11 | 11 | propagate ui.system() request to client |
|
12 | 12 | |
|
13 | 13 | 'attachio' command |
|
14 | 14 | attach client's stdio passed by sendmsg() |
|
15 | 15 | |
|
16 | 16 | 'chdir' command |
|
17 | 17 | change current directory |
|
18 | 18 | |
|
19 | 19 | 'getpager' command |
|
20 | 20 | checks if pager is enabled and which pager should be executed |
|
21 | 21 | |
|
22 | 22 | 'setenv' command |
|
23 | 23 | replace os.environ completely |
|
24 | 24 | |
|
25 | 25 | 'setumask' command |
|
26 | 26 | set umask |
|
27 | 27 | |
|
28 | 28 | 'validate' command |
|
29 | 29 | reload the config and check if the server is up to date |
|
30 | 30 | |
|
31 | 31 | Config |
|
32 | 32 | ------ |
|
33 | 33 | |
|
34 | 34 | :: |
|
35 | 35 | |
|
36 | 36 | [chgserver] |
|
37 | 37 | idletimeout = 3600 # seconds, after which an idle server will exit |
|
38 | 38 | skiphash = False # whether to skip config or env change checks |
|
39 | 39 | """ |
|
40 | 40 | |
|
41 | 41 | from __future__ import absolute_import |
|
42 | 42 | |
|
43 | 43 | import errno |
|
44 | 44 | import hashlib |
|
45 | 45 | import inspect |
|
46 | 46 | import os |
|
47 | 47 | import re |
|
48 | 48 | import signal |
|
49 | 49 | import struct |
|
50 | 50 | import sys |
|
51 | 51 | import time |
|
52 | 52 | |
|
53 | 53 | from mercurial.i18n import _ |
|
54 | 54 | |
|
55 | 55 | from mercurial import ( |
|
56 | 56 | cmdutil, |
|
57 | 57 | commands, |
|
58 | 58 | commandserver, |
|
59 | 59 | dispatch, |
|
60 | 60 | error, |
|
61 | 61 | extensions, |
|
62 | 62 | osutil, |
|
63 | 63 | util, |
|
64 | 64 | ) |
|
65 | 65 | |
|
66 | 66 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
67 | 67 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
68 | 68 | # be specifying the version(s) of Mercurial they are tested with, or |
|
69 | 69 | # leave the attribute unspecified. |
|
70 | 70 | testedwith = 'ships-with-hg-core' |
|
71 | 71 | |
|
72 | 72 | _log = commandserver.log |
|
73 | 73 | |
|
74 | 74 | def _hashlist(items): |
|
75 | 75 | """return sha1 hexdigest for a list""" |
|
76 | 76 | return hashlib.sha1(str(items)).hexdigest() |
|
77 | 77 | |
|
78 | 78 | # sensitive config sections affecting confighash |
|
79 | 79 | _configsections = [ |
|
80 | 80 | 'alias', # affects global state commands.table |
|
81 | 81 | 'extdiff', # uisetup will register new commands |
|
82 | 82 | 'extensions', |
|
83 | 83 | ] |
|
84 | 84 | |
|
85 | 85 | # sensitive environment variables affecting confighash |
|
86 | 86 | _envre = re.compile(r'''\A(?: |
|
87 | 87 | CHGHG |
|
88 | 88 | |HG.* |
|
89 | 89 | |LANG(?:UAGE)? |
|
90 | 90 | |LC_.* |
|
91 | 91 | |LD_.* |
|
92 | 92 | |PATH |
|
93 | 93 | |PYTHON.* |
|
94 | 94 | |TERM(?:INFO)? |
|
95 | 95 | |TZ |
|
96 | 96 | )\Z''', re.X) |
|
97 | 97 | |
|
98 | 98 | def _confighash(ui): |
|
99 | 99 | """return a quick hash for detecting config/env changes |
|
100 | 100 | |
|
101 | 101 | confighash is the hash of sensitive config items and environment variables. |
|
102 | 102 | |
|
103 | 103 | for chgserver, it is designed that once confighash changes, the server is |
|
104 | 104 | not qualified to serve its client and should redirect the client to a new |
|
105 | 105 | server. different from mtimehash, confighash change will not mark the |
|
106 | 106 | server outdated and exit since the user can have different configs at the |
|
107 | 107 | same time. |
|
108 | 108 | """ |
|
109 | 109 | sectionitems = [] |
|
110 | 110 | for section in _configsections: |
|
111 | 111 | sectionitems.append(ui.configitems(section)) |
|
112 | 112 | sectionhash = _hashlist(sectionitems) |
|
113 | 113 | envitems = [(k, v) for k, v in os.environ.iteritems() if _envre.match(k)] |
|
114 | 114 | envhash = _hashlist(sorted(envitems)) |
|
115 | 115 | return sectionhash[:6] + envhash[:6] |
|
116 | 116 | |
|
117 | 117 | def _getmtimepaths(ui): |
|
118 | 118 | """get a list of paths that should be checked to detect change |
|
119 | 119 | |
|
120 | 120 | The list will include: |
|
121 | 121 | - extensions (will not cover all files for complex extensions) |
|
122 | 122 | - mercurial/__version__.py |
|
123 | 123 | - python binary |
|
124 | 124 | """ |
|
125 | 125 | modules = [m for n, m in extensions.extensions(ui)] |
|
126 | 126 | try: |
|
127 | 127 | from mercurial import __version__ |
|
128 | 128 | modules.append(__version__) |
|
129 | 129 | except ImportError: |
|
130 | 130 | pass |
|
131 | 131 | files = [sys.executable] |
|
132 | 132 | for m in modules: |
|
133 | 133 | try: |
|
134 | 134 | files.append(inspect.getabsfile(m)) |
|
135 | 135 | except TypeError: |
|
136 | 136 | pass |
|
137 | 137 | return sorted(set(files)) |
|
138 | 138 | |
|
139 | 139 | def _mtimehash(paths): |
|
140 | 140 | """return a quick hash for detecting file changes |
|
141 | 141 | |
|
142 | 142 | mtimehash calls stat on given paths and calculate a hash based on size and |
|
143 | 143 | mtime of each file. mtimehash does not read file content because reading is |
|
144 | 144 | expensive. therefore it's not 100% reliable for detecting content changes. |
|
145 | 145 | it's possible to return different hashes for same file contents. |
|
146 | 146 | it's also possible to return a same hash for different file contents for |
|
147 | 147 | some carefully crafted situation. |
|
148 | 148 | |
|
149 | 149 | for chgserver, it is designed that once mtimehash changes, the server is |
|
150 | 150 | considered outdated immediately and should no longer provide service. |
|
151 | 151 | |
|
152 | 152 | mtimehash is not included in confighash because we only know the paths of |
|
153 | 153 | extensions after importing them (there is imp.find_module but that faces |
|
154 | 154 | race conditions). We need to calculate confighash without importing. |
|
155 | 155 | """ |
|
156 | 156 | def trystat(path): |
|
157 | 157 | try: |
|
158 | 158 | st = os.stat(path) |
|
159 | 159 | return (st.st_mtime, st.st_size) |
|
160 | 160 | except OSError: |
|
161 | 161 | # could be ENOENT, EPERM etc. not fatal in any case |
|
162 | 162 | pass |
|
163 | 163 | return _hashlist(map(trystat, paths))[:12] |
|
164 | 164 | |
|
165 | 165 | class hashstate(object): |
|
166 | 166 | """a structure storing confighash, mtimehash, paths used for mtimehash""" |
|
167 | 167 | def __init__(self, confighash, mtimehash, mtimepaths): |
|
168 | 168 | self.confighash = confighash |
|
169 | 169 | self.mtimehash = mtimehash |
|
170 | 170 | self.mtimepaths = mtimepaths |
|
171 | 171 | |
|
172 | 172 | @staticmethod |
|
173 | 173 | def fromui(ui, mtimepaths=None): |
|
174 | 174 | if mtimepaths is None: |
|
175 | 175 | mtimepaths = _getmtimepaths(ui) |
|
176 | 176 | confighash = _confighash(ui) |
|
177 | 177 | mtimehash = _mtimehash(mtimepaths) |
|
178 | 178 | _log('confighash = %s mtimehash = %s\n' % (confighash, mtimehash)) |
|
179 | 179 | return hashstate(confighash, mtimehash, mtimepaths) |
|
180 | 180 | |
|
181 | 181 | # copied from hgext/pager.py:uisetup() |
|
182 | 182 | def _setuppagercmd(ui, options, cmd): |
|
183 | 183 | if not ui.formatted(): |
|
184 | 184 | return |
|
185 | 185 | |
|
186 | 186 | p = ui.config("pager", "pager", os.environ.get("PAGER")) |
|
187 | 187 | usepager = False |
|
188 | 188 | always = util.parsebool(options['pager']) |
|
189 | 189 | auto = options['pager'] == 'auto' |
|
190 | 190 | |
|
191 | 191 | if not p: |
|
192 | 192 | pass |
|
193 | 193 | elif always: |
|
194 | 194 | usepager = True |
|
195 | 195 | elif not auto: |
|
196 | 196 | usepager = False |
|
197 | 197 | else: |
|
198 | 198 | attended = ['annotate', 'cat', 'diff', 'export', 'glog', 'log', 'qdiff'] |
|
199 | 199 | attend = ui.configlist('pager', 'attend', attended) |
|
200 | 200 | ignore = ui.configlist('pager', 'ignore') |
|
201 | 201 | cmds, _ = cmdutil.findcmd(cmd, commands.table) |
|
202 | 202 | |
|
203 | 203 | for cmd in cmds: |
|
204 | 204 | var = 'attend-%s' % cmd |
|
205 | 205 | if ui.config('pager', var): |
|
206 | 206 | usepager = ui.configbool('pager', var) |
|
207 | 207 | break |
|
208 | 208 | if (cmd in attend or |
|
209 | 209 | (cmd not in ignore and not attend)): |
|
210 | 210 | usepager = True |
|
211 | 211 | break |
|
212 | 212 | |
|
213 | 213 | if usepager: |
|
214 | 214 | ui.setconfig('ui', 'formatted', ui.formatted(), 'pager') |
|
215 | 215 | ui.setconfig('ui', 'interactive', False, 'pager') |
|
216 | 216 | return p |
|
217 | 217 | |
|
218 | 218 | def _newchgui(srcui, csystem): |
|
219 | 219 | class chgui(srcui.__class__): |
|
220 | 220 | def __init__(self, src=None): |
|
221 | 221 | super(chgui, self).__init__(src) |
|
222 | 222 | if src: |
|
223 | 223 | self._csystem = getattr(src, '_csystem', csystem) |
|
224 | 224 | else: |
|
225 | 225 | self._csystem = csystem |
|
226 | 226 | |
|
227 | 227 | def system(self, cmd, environ=None, cwd=None, onerr=None, |
|
228 | 228 | errprefix=None): |
|
229 | 229 | # fallback to the original system method if the output needs to be |
|
230 | 230 | # captured (to self._buffers), or the output stream is not stdout |
|
231 | 231 | # (e.g. stderr, cStringIO), because the chg client is not aware of |
|
232 | 232 | # these situations and will behave differently (write to stdout). |
|
233 | 233 | if (any(s[1] for s in self._bufferstates) |
|
234 | 234 | or not util.safehasattr(self.fout, 'fileno') |
|
235 | 235 | or self.fout.fileno() != sys.stdout.fileno()): |
|
236 | 236 | return super(chgui, self).system(cmd, environ, cwd, onerr, |
|
237 | 237 | errprefix) |
|
238 | 238 | # copied from mercurial/util.py:system() |
|
239 | 239 | self.flush() |
|
240 | 240 | def py2shell(val): |
|
241 | 241 | if val is None or val is False: |
|
242 | 242 | return '0' |
|
243 | 243 | if val is True: |
|
244 | 244 | return '1' |
|
245 | 245 | return str(val) |
|
246 | 246 | env = os.environ.copy() |
|
247 | 247 | if environ: |
|
248 | 248 | env.update((k, py2shell(v)) for k, v in environ.iteritems()) |
|
249 | 249 | env['HG'] = util.hgexecutable() |
|
250 | 250 | rc = self._csystem(cmd, env, cwd) |
|
251 | 251 | if rc and onerr: |
|
252 | 252 | errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]), |
|
253 | 253 | util.explainexit(rc)[0]) |
|
254 | 254 | if errprefix: |
|
255 | 255 | errmsg = '%s: %s' % (errprefix, errmsg) |
|
256 | 256 | raise onerr(errmsg) |
|
257 | 257 | return rc |
|
258 | 258 | |
|
259 | 259 | return chgui(srcui) |
|
260 | 260 | |
|
261 | 261 | def _loadnewui(srcui, args): |
|
262 | 262 | newui = srcui.__class__() |
|
263 | 263 | for a in ['fin', 'fout', 'ferr', 'environ']: |
|
264 | 264 | setattr(newui, a, getattr(srcui, a)) |
|
265 | 265 | if util.safehasattr(srcui, '_csystem'): |
|
266 | 266 | newui._csystem = srcui._csystem |
|
267 | 267 | |
|
268 | 268 | # internal config: extensions.chgserver |
|
269 | 269 | newui.setconfig('extensions', 'chgserver', |
|
270 | 270 | srcui.config('extensions', 'chgserver'), '--config') |
|
271 | 271 | |
|
272 | 272 | # command line args |
|
273 | 273 | args = args[:] |
|
274 | 274 | dispatch._parseconfig(newui, dispatch._earlygetopt(['--config'], args)) |
|
275 | 275 | |
|
276 | 276 | # stolen from tortoisehg.util.copydynamicconfig() |
|
277 | 277 | for section, name, value in srcui.walkconfig(): |
|
278 | 278 | source = srcui.configsource(section, name) |
|
279 | 279 | if ':' in source or source == '--config': |
|
280 | 280 | # path:line or command line |
|
281 | 281 | continue |
|
282 | 282 | if source == 'none': |
|
283 | 283 | # ui.configsource returns 'none' by default |
|
284 | 284 | source = '' |
|
285 | 285 | newui.setconfig(section, name, value, source) |
|
286 | 286 | |
|
287 | 287 | # load wd and repo config, copied from dispatch.py |
|
288 | 288 | cwds = dispatch._earlygetopt(['--cwd'], args) |
|
289 | 289 | cwd = cwds and os.path.realpath(cwds[-1]) or None |
|
290 | 290 | rpath = dispatch._earlygetopt(["-R", "--repository", "--repo"], args) |
|
291 | 291 | path, newlui = dispatch._getlocal(newui, rpath, wd=cwd) |
|
292 | 292 | |
|
293 | 293 | return (newui, newlui) |
|
294 | 294 | |
|
295 | 295 | class channeledsystem(object): |
|
296 | 296 | """Propagate ui.system() request in the following format: |
|
297 | 297 | |
|
298 | 298 | payload length (unsigned int), |
|
299 | 299 | cmd, '\0', |
|
300 | 300 | cwd, '\0', |
|
301 | 301 | envkey, '=', val, '\0', |
|
302 | 302 | ... |
|
303 | 303 | envkey, '=', val |
|
304 | 304 | |
|
305 | 305 | and waits: |
|
306 | 306 | |
|
307 | 307 | exitcode length (unsigned int), |
|
308 | 308 | exitcode (int) |
|
309 | 309 | """ |
|
310 | 310 | def __init__(self, in_, out, channel): |
|
311 | 311 | self.in_ = in_ |
|
312 | 312 | self.out = out |
|
313 | 313 | self.channel = channel |
|
314 | 314 | |
|
315 | 315 | def __call__(self, cmd, environ, cwd): |
|
316 | 316 | args = [util.quotecommand(cmd), os.path.abspath(cwd or '.')] |
|
317 | 317 | args.extend('%s=%s' % (k, v) for k, v in environ.iteritems()) |
|
318 | 318 | data = '\0'.join(args) |
|
319 | 319 | self.out.write(struct.pack('>cI', self.channel, len(data))) |
|
320 | 320 | self.out.write(data) |
|
321 | 321 | self.out.flush() |
|
322 | 322 | |
|
323 | 323 | length = self.in_.read(4) |
|
324 | 324 | length, = struct.unpack('>I', length) |
|
325 | 325 | if length != 4: |
|
326 | 326 | raise error.Abort(_('invalid response')) |
|
327 | 327 | rc, = struct.unpack('>i', self.in_.read(4)) |
|
328 | 328 | return rc |
|
329 | 329 | |
|
330 | 330 | _iochannels = [ |
|
331 | 331 | # server.ch, ui.fp, mode |
|
332 | 332 | ('cin', 'fin', 'rb'), |
|
333 | 333 | ('cout', 'fout', 'wb'), |
|
334 | 334 | ('cerr', 'ferr', 'wb'), |
|
335 | 335 | ] |
|
336 | 336 | |
|
337 | 337 | class chgcmdserver(commandserver.server): |
|
338 | 338 | def __init__(self, ui, repo, fin, fout, sock, hashstate, baseaddress): |
|
339 | 339 | super(chgcmdserver, self).__init__( |
|
340 | 340 | _newchgui(ui, channeledsystem(fin, fout, 'S')), repo, fin, fout) |
|
341 | 341 | self.clientsock = sock |
|
342 | 342 | self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio" |
|
343 | 343 | self.hashstate = hashstate |
|
344 | 344 | self.baseaddress = baseaddress |
|
345 | 345 | if hashstate is not None: |
|
346 | 346 | self.capabilities = self.capabilities.copy() |
|
347 | 347 | self.capabilities['validate'] = chgcmdserver.validate |
|
348 | 348 | |
|
349 | 349 | def cleanup(self): |
|
350 | 350 | super(chgcmdserver, self).cleanup() |
|
351 | 351 | # dispatch._runcatch() does not flush outputs if exception is not |
|
352 | 352 | # handled by dispatch._dispatch() |
|
353 | 353 | self.ui.flush() |
|
354 | 354 | self._restoreio() |
|
355 | 355 | |
|
356 | 356 | def attachio(self): |
|
357 | 357 | """Attach to client's stdio passed via unix domain socket; all |
|
358 | 358 | channels except cresult will no longer be used |
|
359 | 359 | """ |
|
360 | 360 | # tell client to sendmsg() with 1-byte payload, which makes it |
|
361 | 361 | # distinctive from "attachio\n" command consumed by client.read() |
|
362 | 362 | self.clientsock.sendall(struct.pack('>cI', 'I', 1)) |
|
363 | 363 | clientfds = osutil.recvfds(self.clientsock.fileno()) |
|
364 | 364 | _log('received fds: %r\n' % clientfds) |
|
365 | 365 | |
|
366 | 366 | ui = self.ui |
|
367 | 367 | ui.flush() |
|
368 | 368 | first = self._saveio() |
|
369 | 369 | for fd, (cn, fn, mode) in zip(clientfds, _iochannels): |
|
370 | 370 | assert fd > 0 |
|
371 | 371 | fp = getattr(ui, fn) |
|
372 | 372 | os.dup2(fd, fp.fileno()) |
|
373 | 373 | os.close(fd) |
|
374 | 374 | if not first: |
|
375 | 375 | continue |
|
376 | 376 | # reset buffering mode when client is first attached. as we want |
|
377 | 377 | # to see output immediately on pager, the mode stays unchanged |
|
378 | 378 | # when client re-attached. ferr is unchanged because it should |
|
379 | 379 | # be unbuffered no matter if it is a tty or not. |
|
380 | 380 | if fn == 'ferr': |
|
381 | 381 | newfp = fp |
|
382 | 382 | else: |
|
383 | 383 | # make it line buffered explicitly because the default is |
|
384 | 384 | # decided on first write(), where fout could be a pager. |
|
385 | 385 | if fp.isatty(): |
|
386 | 386 | bufsize = 1 # line buffered |
|
387 | 387 | else: |
|
388 | 388 | bufsize = -1 # system default |
|
389 | 389 | newfp = os.fdopen(fp.fileno(), mode, bufsize) |
|
390 | 390 | setattr(ui, fn, newfp) |
|
391 | 391 | setattr(self, cn, newfp) |
|
392 | 392 | |
|
393 | 393 | self.cresult.write(struct.pack('>i', len(clientfds))) |
|
394 | 394 | |
|
395 | 395 | def _saveio(self): |
|
396 | 396 | if self._oldios: |
|
397 | 397 | return False |
|
398 | 398 | ui = self.ui |
|
399 | 399 | for cn, fn, _mode in _iochannels: |
|
400 | 400 | ch = getattr(self, cn) |
|
401 | 401 | fp = getattr(ui, fn) |
|
402 | 402 | fd = os.dup(fp.fileno()) |
|
403 | 403 | self._oldios.append((ch, fp, fd)) |
|
404 | 404 | return True |
|
405 | 405 | |
|
406 | 406 | def _restoreio(self): |
|
407 | 407 | ui = self.ui |
|
408 | 408 | for (ch, fp, fd), (cn, fn, _mode) in zip(self._oldios, _iochannels): |
|
409 | 409 | newfp = getattr(ui, fn) |
|
410 | 410 | # close newfp while it's associated with client; otherwise it |
|
411 | 411 | # would be closed when newfp is deleted |
|
412 | 412 | if newfp is not fp: |
|
413 | 413 | newfp.close() |
|
414 | 414 | # restore original fd: fp is open again |
|
415 | 415 | os.dup2(fd, fp.fileno()) |
|
416 | 416 | os.close(fd) |
|
417 | 417 | setattr(self, cn, ch) |
|
418 | 418 | setattr(ui, fn, fp) |
|
419 | 419 | del self._oldios[:] |
|
420 | 420 | |
|
421 | 421 | def validate(self): |
|
422 | 422 | """Reload the config and check if the server is up to date |
|
423 | 423 | |
|
424 | 424 | Read a list of '\0' separated arguments. |
|
425 | 425 | Write a non-empty list of '\0' separated instruction strings or '\0' |
|
426 | 426 | if the list is empty. |
|
427 | 427 | An instruction string could be either: |
|
428 | 428 | - "unlink $path", the client should unlink the path to stop the |
|
429 | 429 | outdated server. |
|
430 | 430 | - "redirect $path", the client should attempt to connect to $path |
|
431 | 431 | first. If it does not work, start a new server. It implies |
|
432 | 432 | "reconnect". |
|
433 | 433 | - "exit $n", the client should exit directly with code n. |
|
434 | 434 | This may happen if we cannot parse the config. |
|
435 | 435 | - "reconnect", the client should close the connection and |
|
436 | 436 | reconnect. |
|
437 | 437 | If neither "reconnect" nor "redirect" is included in the instruction |
|
438 | 438 | list, the client can continue with this server after completing all |
|
439 | 439 | the instructions. |
|
440 | 440 | """ |
|
441 | 441 | args = self._readlist() |
|
442 | 442 | try: |
|
443 | 443 | self.ui, lui = _loadnewui(self.ui, args) |
|
444 | 444 | except error.ParseError as inst: |
|
445 | 445 | dispatch._formatparse(self.ui.warn, inst) |
|
446 | 446 | self.ui.flush() |
|
447 | 447 | self.cresult.write('exit 255') |
|
448 | 448 | return |
|
449 | 449 | newhash = hashstate.fromui(lui, self.hashstate.mtimepaths) |
|
450 | 450 | insts = [] |
|
451 | 451 | if newhash.mtimehash != self.hashstate.mtimehash: |
|
452 | 452 | addr = _hashaddress(self.baseaddress, self.hashstate.confighash) |
|
453 | 453 | insts.append('unlink %s' % addr) |
|
454 | 454 | # mtimehash is empty if one or more extensions fail to load. |
|
455 | 455 | # to be compatible with hg, still serve the client this time. |
|
456 | 456 | if self.hashstate.mtimehash: |
|
457 | 457 | insts.append('reconnect') |
|
458 | 458 | if newhash.confighash != self.hashstate.confighash: |
|
459 | 459 | addr = _hashaddress(self.baseaddress, newhash.confighash) |
|
460 | 460 | insts.append('redirect %s' % addr) |
|
461 | 461 | _log('validate: %s\n' % insts) |
|
462 | 462 | self.cresult.write('\0'.join(insts) or '\0') |
|
463 | 463 | |
|
464 | 464 | def chdir(self): |
|
465 | 465 | """Change current directory |
|
466 | 466 | |
|
467 | 467 | Note that the behavior of --cwd option is bit different from this. |
|
468 | 468 | It does not affect --config parameter. |
|
469 | 469 | """ |
|
470 | 470 | path = self._readstr() |
|
471 | 471 | if not path: |
|
472 | 472 | return |
|
473 | 473 | _log('chdir to %r\n' % path) |
|
474 | 474 | os.chdir(path) |
|
475 | 475 | |
|
476 | 476 | def setumask(self): |
|
477 | 477 | """Change umask""" |
|
478 | 478 | mask = struct.unpack('>I', self._read(4))[0] |
|
479 | 479 | _log('setumask %r\n' % mask) |
|
480 | 480 | os.umask(mask) |
|
481 | 481 | |
|
482 | 482 | def getpager(self): |
|
483 | 483 | """Read cmdargs and write pager command to r-channel if enabled |
|
484 | 484 | |
|
485 | 485 | If pager isn't enabled, this writes '\0' because channeledoutput |
|
486 | 486 | does not allow to write empty data. |
|
487 | 487 | """ |
|
488 | 488 | args = self._readlist() |
|
489 | 489 | try: |
|
490 | 490 | cmd, _func, args, options, _cmdoptions = dispatch._parse(self.ui, |
|
491 | 491 | args) |
|
492 | 492 | except (error.Abort, error.AmbiguousCommand, error.CommandError, |
|
493 | 493 | error.UnknownCommand): |
|
494 | 494 | cmd = None |
|
495 | 495 | options = {} |
|
496 | 496 | if not cmd or 'pager' not in options: |
|
497 | 497 | self.cresult.write('\0') |
|
498 | 498 | return |
|
499 | 499 | |
|
500 | 500 | pagercmd = _setuppagercmd(self.ui, options, cmd) |
|
501 | 501 | if pagercmd: |
|
502 | 502 | # Python's SIGPIPE is SIG_IGN by default. change to SIG_DFL so |
|
503 | 503 | # we can exit if the pipe to the pager is closed |
|
504 | 504 | if util.safehasattr(signal, 'SIGPIPE') and \ |
|
505 | 505 | signal.getsignal(signal.SIGPIPE) == signal.SIG_IGN: |
|
506 | 506 | signal.signal(signal.SIGPIPE, signal.SIG_DFL) |
|
507 | 507 | self.cresult.write(pagercmd) |
|
508 | 508 | else: |
|
509 | 509 | self.cresult.write('\0') |
|
510 | 510 | |
|
511 | 511 | def setenv(self): |
|
512 | 512 | """Clear and update os.environ |
|
513 | 513 | |
|
514 | 514 | Note that not all variables can make an effect on the running process. |
|
515 | 515 | """ |
|
516 | 516 | l = self._readlist() |
|
517 | 517 | try: |
|
518 | 518 | newenv = dict(s.split('=', 1) for s in l) |
|
519 | 519 | except ValueError: |
|
520 | 520 | raise ValueError('unexpected value in setenv request') |
|
521 | 521 | _log('setenv: %r\n' % sorted(newenv.keys())) |
|
522 | 522 | os.environ.clear() |
|
523 | 523 | os.environ.update(newenv) |
|
524 | 524 | |
|
525 | 525 | capabilities = commandserver.server.capabilities.copy() |
|
526 | 526 | capabilities.update({'attachio': attachio, |
|
527 | 527 | 'chdir': chdir, |
|
528 | 528 | 'getpager': getpager, |
|
529 | 529 | 'setenv': setenv, |
|
530 | 530 | 'setumask': setumask}) |
|
531 | 531 | |
|
532 | 532 | def _tempaddress(address): |
|
533 | 533 | return '%s.%d.tmp' % (address, os.getpid()) |
|
534 | 534 | |
|
535 | 535 | def _hashaddress(address, hashstr): |
|
536 | 536 | return '%s-%s' % (address, hashstr) |
|
537 | 537 | |
|
538 | 538 | class chgunixservicehandler(object): |
|
539 | 539 | """Set of operations for chg services""" |
|
540 | 540 | |
|
541 | 541 | pollinterval = 1 # [sec] |
|
542 | 542 | |
|
543 | 543 | def __init__(self, ui): |
|
544 | 544 | self.ui = ui |
|
545 | 545 | self._idletimeout = ui.configint('chgserver', 'idletimeout', 3600) |
|
546 | 546 | self._lastactive = time.time() |
|
547 | 547 | |
|
548 | 548 | def bindsocket(self, sock, address): |
|
549 | 549 | self._inithashstate(address) |
|
550 | 550 | self._checkextensions() |
|
551 | 551 | self._bind(sock) |
|
552 | 552 | self._createsymlink() |
|
553 | 553 | |
|
554 | 554 | def _inithashstate(self, address): |
|
555 | 555 | self._baseaddress = address |
|
556 | 556 | if self.ui.configbool('chgserver', 'skiphash', False): |
|
557 | 557 | self._hashstate = None |
|
558 | 558 | self._realaddress = address |
|
559 | 559 | return |
|
560 | 560 | self._hashstate = hashstate.fromui(self.ui) |
|
561 | 561 | self._realaddress = _hashaddress(address, self._hashstate.confighash) |
|
562 | 562 | |
|
563 | 563 | def _checkextensions(self): |
|
564 | 564 | if not self._hashstate: |
|
565 | 565 | return |
|
566 | 566 | if extensions.notloaded(): |
|
567 | 567 | # one or more extensions failed to load. mtimehash becomes |
|
568 | 568 | # meaningless because we do not know the paths of those extensions. |
|
569 | 569 | # set mtimehash to an illegal hash value to invalidate the server. |
|
570 | 570 | self._hashstate.mtimehash = '' |
|
571 | 571 | |
|
572 | 572 | def _bind(self, sock): |
|
573 | 573 | # use a unique temp address so we can stat the file and do ownership |
|
574 | 574 | # check later |
|
575 | 575 | tempaddress = _tempaddress(self._realaddress) |
|
576 | 576 | util.bindunixsocket(sock, tempaddress) |
|
577 | 577 | self._socketstat = os.stat(tempaddress) |
|
578 | 578 | # rename will replace the old socket file if exists atomically. the |
|
579 | 579 | # old server will detect ownership change and exit. |
|
580 | 580 | util.rename(tempaddress, self._realaddress) |
|
581 | 581 | |
|
582 | 582 | def _createsymlink(self): |
|
583 | 583 | if self._baseaddress == self._realaddress: |
|
584 | 584 | return |
|
585 | 585 | tempaddress = _tempaddress(self._baseaddress) |
|
586 | 586 | os.symlink(os.path.basename(self._realaddress), tempaddress) |
|
587 | 587 | util.rename(tempaddress, self._baseaddress) |
|
588 | 588 | |
|
589 | 589 | def _issocketowner(self): |
|
590 | 590 | try: |
|
591 | 591 | stat = os.stat(self._realaddress) |
|
592 | 592 | return (stat.st_ino == self._socketstat.st_ino and |
|
593 | 593 | stat.st_mtime == self._socketstat.st_mtime) |
|
594 | 594 | except OSError: |
|
595 | 595 | return False |
|
596 | 596 | |
|
597 | 597 | def unlinksocket(self, address): |
|
598 | 598 | if not self._issocketowner(): |
|
599 | 599 | return |
|
600 | 600 | # it is possible to have a race condition here that we may |
|
601 | 601 | # remove another server's socket file. but that's okay |
|
602 | 602 | # since that server will detect and exit automatically and |
|
603 | 603 | # the client will start a new server on demand. |
|
604 | 604 | try: |
|
605 | 605 | os.unlink(self._realaddress) |
|
606 | 606 | except OSError as exc: |
|
607 | 607 | if exc.errno != errno.ENOENT: |
|
608 | 608 | raise |
|
609 | 609 | |
|
610 | 610 | def printbanner(self, address): |
|
611 | 611 | # no "listening at" message should be printed to simulate hg behavior |
|
612 | 612 | pass |
|
613 | 613 | |
|
614 | 614 | def shouldexit(self): |
|
615 | 615 | if not self._issocketowner(): |
|
616 | 616 | self.ui.debug('%s is not owned, exiting.\n' % self._realaddress) |
|
617 | 617 | return True |
|
618 | 618 | if time.time() - self._lastactive > self._idletimeout: |
|
619 | 619 | self.ui.debug('being idle too long. exiting.\n') |
|
620 | 620 | return True |
|
621 | 621 | return False |
|
622 | 622 | |
|
623 | 623 | def newconnection(self): |
|
624 | 624 | self._lastactive = time.time() |
|
625 | 625 | |
|
626 | 626 | def createcmdserver(self, repo, conn, fin, fout): |
|
627 | 627 | return chgcmdserver(self.ui, repo, fin, fout, conn, |
|
628 | 628 | self._hashstate, self._baseaddress) |
|
629 | 629 | |
|
630 | 630 | def chgunixservice(ui, repo, opts): |
|
631 | 631 | if repo: |
|
632 | # one chgserver can serve multiple repos. drop repo infomation | |
|
632 | # one chgserver can serve multiple repos. drop repo information | |
|
633 | 633 | ui.setconfig('bundle', 'mainreporoot', '', 'repo') |
|
634 | 634 | h = chgunixservicehandler(ui) |
|
635 | 635 | return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h) |
|
636 | 636 | |
|
637 | 637 | def uisetup(ui): |
|
638 | 638 | commandserver._servicemap['chgunix'] = chgunixservice |
|
639 | 639 | |
|
640 | 640 | # CHGINTERNALMARK is temporarily set by chg client to detect if chg will |
|
641 | 641 | # start another chg. drop it to avoid possible side effects. |
|
642 | 642 | if 'CHGINTERNALMARK' in os.environ: |
|
643 | 643 | del os.environ['CHGINTERNALMARK'] |
@@ -1,695 +1,695 | |||
|
1 | 1 | # __init__.py - fsmonitor initialization and overrides |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2013-2016 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | '''Faster status operations with the Watchman file monitor (EXPERIMENTAL) |
|
9 | 9 | |
|
10 | 10 | Integrates the file-watching program Watchman with Mercurial to produce faster |
|
11 | 11 | status results. |
|
12 | 12 | |
|
13 | 13 | On a particular Linux system, for a real-world repository with over 400,000 |
|
14 | 14 | files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same |
|
15 | 15 | system, with fsmonitor it takes about 0.3 seconds. |
|
16 | 16 | |
|
17 | 17 | fsmonitor requires no configuration -- it will tell Watchman about your |
|
18 | 18 | repository as necessary. You'll need to install Watchman from |
|
19 | 19 | https://facebook.github.io/watchman/ and make sure it is in your PATH. |
|
20 | 20 | |
|
21 | 21 | The following configuration options exist: |
|
22 | 22 | |
|
23 | 23 | :: |
|
24 | 24 | |
|
25 | 25 | [fsmonitor] |
|
26 | 26 | mode = {off, on, paranoid} |
|
27 | 27 | |
|
28 | 28 | When `mode = off`, fsmonitor will disable itself (similar to not loading the |
|
29 | 29 | extension at all). When `mode = on`, fsmonitor will be enabled (the default). |
|
30 | 30 | When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem, |
|
31 | 31 | and ensure that the results are consistent. |
|
32 | 32 | |
|
33 | 33 | :: |
|
34 | 34 | |
|
35 | 35 | [fsmonitor] |
|
36 | 36 | timeout = (float) |
|
37 | 37 | |
|
38 | 38 | A value, in seconds, that determines how long fsmonitor will wait for Watchman |
|
39 | 39 | to return results. Defaults to `2.0`. |
|
40 | 40 | |
|
41 | 41 | :: |
|
42 | 42 | |
|
43 | 43 | [fsmonitor] |
|
44 | 44 | blacklistusers = (list of userids) |
|
45 | 45 | |
|
46 | 46 | A list of usernames for which fsmonitor will disable itself altogether. |
|
47 | 47 | |
|
48 | 48 | :: |
|
49 | 49 | |
|
50 | 50 | [fsmonitor] |
|
51 | 51 | walk_on_invalidate = (boolean) |
|
52 | 52 | |
|
53 | 53 | Whether or not to walk the whole repo ourselves when our cached state has been |
|
54 | 54 | invalidated, for example when Watchman has been restarted or .hgignore rules |
|
55 | 55 | have been changed. Walking the repo in that case can result in competing for |
|
56 | 56 | I/O with Watchman. For large repos it is recommended to set this value to |
|
57 | 57 | false. You may wish to set this to true if you have a very fast filesystem |
|
58 | 58 | that can outpace the IPC overhead of getting the result data for the full repo |
|
59 | 59 | from Watchman. Defaults to false. |
|
60 | 60 | |
|
61 | 61 | fsmonitor is incompatible with the largefiles and eol extensions, and |
|
62 | 62 | will disable itself if any of those are active. |
|
63 | 63 | |
|
64 | 64 | ''' |
|
65 | 65 | |
|
66 | 66 | # Platforms Supported |
|
67 | 67 | # =================== |
|
68 | 68 | # |
|
69 | 69 | # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably, |
|
70 | 70 | # even under severe loads. |
|
71 | 71 | # |
|
72 | 72 | # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor |
|
73 | 73 | # turned on, on case-insensitive HFS+. There has been a reasonable amount of |
|
74 | 74 | # user testing under normal loads. |
|
75 | 75 | # |
|
76 | 76 | # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but |
|
77 | 77 | # very little testing has been done. |
|
78 | 78 | # |
|
79 | 79 | # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet. |
|
80 | 80 | # |
|
81 | 81 | # Known Issues |
|
82 | 82 | # ============ |
|
83 | 83 | # |
|
84 | 84 | # * fsmonitor will disable itself if any of the following extensions are |
|
85 | 85 | # enabled: largefiles, inotify, eol; or if the repository has subrepos. |
|
86 | 86 | # * fsmonitor will produce incorrect results if nested repos that are not |
|
87 | 87 | # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`. |
|
88 | 88 | # |
|
89 | 89 | # The issues related to nested repos and subrepos are probably not fundamental |
|
90 | 90 | # ones. Patches to fix them are welcome. |
|
91 | 91 | |
|
92 | 92 | from __future__ import absolute_import |
|
93 | 93 | |
|
94 | 94 | import hashlib |
|
95 | 95 | import os |
|
96 | 96 | import stat |
|
97 | 97 | import sys |
|
98 | 98 | |
|
99 | 99 | from mercurial.i18n import _ |
|
100 | 100 | from mercurial import ( |
|
101 | 101 | context, |
|
102 | 102 | extensions, |
|
103 | 103 | localrepo, |
|
104 | 104 | merge, |
|
105 | 105 | pathutil, |
|
106 | 106 | scmutil, |
|
107 | 107 | util, |
|
108 | 108 | ) |
|
109 | 109 | from mercurial import match as matchmod |
|
110 | 110 | |
|
111 | 111 | from . import ( |
|
112 | 112 | state, |
|
113 | 113 | watchmanclient, |
|
114 | 114 | ) |
|
115 | 115 | |
|
116 | 116 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
117 | 117 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
118 | 118 | # be specifying the version(s) of Mercurial they are tested with, or |
|
119 | 119 | # leave the attribute unspecified. |
|
120 | 120 | testedwith = 'ships-with-hg-core' |
|
121 | 121 | |
|
122 | 122 | # This extension is incompatible with the following blacklisted extensions |
|
123 | 123 | # and will disable itself when encountering one of these: |
|
124 | 124 | _blacklist = ['largefiles', 'eol'] |
|
125 | 125 | |
|
126 | 126 | def _handleunavailable(ui, state, ex): |
|
127 | 127 | """Exception handler for Watchman interaction exceptions""" |
|
128 | 128 | if isinstance(ex, watchmanclient.Unavailable): |
|
129 | 129 | if ex.warn: |
|
130 | 130 | ui.warn(str(ex) + '\n') |
|
131 | 131 | if ex.invalidate: |
|
132 | 132 | state.invalidate() |
|
133 | 133 | ui.log('fsmonitor', 'Watchman unavailable: %s\n', ex.msg) |
|
134 | 134 | else: |
|
135 | 135 | ui.log('fsmonitor', 'Watchman exception: %s\n', ex) |
|
136 | 136 | |
|
137 | 137 | def _hashignore(ignore): |
|
138 | 138 | """Calculate hash for ignore patterns and filenames |
|
139 | 139 | |
|
140 | 140 | If this information changes between Mercurial invocations, we can't |
|
141 | 141 | rely on Watchman information anymore and have to re-scan the working |
|
142 | 142 | copy. |
|
143 | 143 | |
|
144 | 144 | """ |
|
145 | 145 | sha1 = hashlib.sha1() |
|
146 | 146 | if util.safehasattr(ignore, 'includepat'): |
|
147 | 147 | sha1.update(ignore.includepat) |
|
148 | 148 | sha1.update('\0\0') |
|
149 | 149 | if util.safehasattr(ignore, 'excludepat'): |
|
150 | 150 | sha1.update(ignore.excludepat) |
|
151 | 151 | sha1.update('\0\0') |
|
152 | 152 | if util.safehasattr(ignore, 'patternspat'): |
|
153 | 153 | sha1.update(ignore.patternspat) |
|
154 | 154 | sha1.update('\0\0') |
|
155 | 155 | if util.safehasattr(ignore, '_files'): |
|
156 | 156 | for f in ignore._files: |
|
157 | 157 | sha1.update(f) |
|
158 | 158 | sha1.update('\0') |
|
159 | 159 | return sha1.hexdigest() |
|
160 | 160 | |
|
161 | 161 | def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True): |
|
162 | 162 | '''Replacement for dirstate.walk, hooking into Watchman. |
|
163 | 163 | |
|
164 | 164 | Whenever full is False, ignored is False, and the Watchman client is |
|
165 | 165 | available, use Watchman combined with saved state to possibly return only a |
|
166 | 166 | subset of files.''' |
|
167 | 167 | def bail(): |
|
168 | 168 | return orig(match, subrepos, unknown, ignored, full=True) |
|
169 | 169 | |
|
170 | 170 | if full or ignored or not self._watchmanclient.available(): |
|
171 | 171 | return bail() |
|
172 | 172 | state = self._fsmonitorstate |
|
173 | 173 | clock, ignorehash, notefiles = state.get() |
|
174 | 174 | if not clock: |
|
175 | 175 | if state.walk_on_invalidate: |
|
176 | 176 | return bail() |
|
177 | 177 | # Initial NULL clock value, see |
|
178 | 178 | # https://facebook.github.io/watchman/docs/clockspec.html |
|
179 | 179 | clock = 'c:0:0' |
|
180 | 180 | notefiles = [] |
|
181 | 181 | |
|
182 | 182 | def fwarn(f, msg): |
|
183 | 183 | self._ui.warn('%s: %s\n' % (self.pathto(f), msg)) |
|
184 | 184 | return False |
|
185 | 185 | |
|
186 | 186 | def badtype(mode): |
|
187 | 187 | kind = _('unknown') |
|
188 | 188 | if stat.S_ISCHR(mode): |
|
189 | 189 | kind = _('character device') |
|
190 | 190 | elif stat.S_ISBLK(mode): |
|
191 | 191 | kind = _('block device') |
|
192 | 192 | elif stat.S_ISFIFO(mode): |
|
193 | 193 | kind = _('fifo') |
|
194 | 194 | elif stat.S_ISSOCK(mode): |
|
195 | 195 | kind = _('socket') |
|
196 | 196 | elif stat.S_ISDIR(mode): |
|
197 | 197 | kind = _('directory') |
|
198 | 198 | return _('unsupported file type (type is %s)') % kind |
|
199 | 199 | |
|
200 | 200 | ignore = self._ignore |
|
201 | 201 | dirignore = self._dirignore |
|
202 | 202 | if unknown: |
|
203 | 203 | if _hashignore(ignore) != ignorehash and clock != 'c:0:0': |
|
204 | 204 | # ignore list changed -- can't rely on Watchman state any more |
|
205 | 205 | if state.walk_on_invalidate: |
|
206 | 206 | return bail() |
|
207 | 207 | notefiles = [] |
|
208 | 208 | clock = 'c:0:0' |
|
209 | 209 | else: |
|
210 | 210 | # always ignore |
|
211 | 211 | ignore = util.always |
|
212 | 212 | dirignore = util.always |
|
213 | 213 | |
|
214 | 214 | matchfn = match.matchfn |
|
215 | 215 | matchalways = match.always() |
|
216 | 216 | dmap = self._map |
|
217 | 217 | nonnormalset = getattr(self, '_nonnormalset', None) |
|
218 | 218 | |
|
219 | 219 | copymap = self._copymap |
|
220 | 220 | getkind = stat.S_IFMT |
|
221 | 221 | dirkind = stat.S_IFDIR |
|
222 | 222 | regkind = stat.S_IFREG |
|
223 | 223 | lnkkind = stat.S_IFLNK |
|
224 | 224 | join = self._join |
|
225 | 225 | normcase = util.normcase |
|
226 | 226 | fresh_instance = False |
|
227 | 227 | |
|
228 | 228 | exact = skipstep3 = False |
|
229 | 229 | if matchfn == match.exact: # match.exact |
|
230 | 230 | exact = True |
|
231 | 231 | dirignore = util.always # skip step 2 |
|
232 | 232 | elif match.files() and not match.anypats(): # match.match, no patterns |
|
233 | 233 | skipstep3 = True |
|
234 | 234 | |
|
235 | 235 | if not exact and self._checkcase: |
|
236 | 236 | # note that even though we could receive directory entries, we're only |
|
237 | 237 | # interested in checking if a file with the same name exists. So only |
|
238 | 238 | # normalize files if possible. |
|
239 | 239 | normalize = self._normalizefile |
|
240 | 240 | skipstep3 = False |
|
241 | 241 | else: |
|
242 | 242 | normalize = None |
|
243 | 243 | |
|
244 | 244 | # step 1: find all explicit files |
|
245 | 245 | results, work, dirsnotfound = self._walkexplicit(match, subrepos) |
|
246 | 246 | |
|
247 | 247 | skipstep3 = skipstep3 and not (work or dirsnotfound) |
|
248 | 248 | work = [d for d in work if not dirignore(d[0])] |
|
249 | 249 | |
|
250 | 250 | if not work and (exact or skipstep3): |
|
251 | 251 | for s in subrepos: |
|
252 | 252 | del results[s] |
|
253 | 253 | del results['.hg'] |
|
254 | 254 | return results |
|
255 | 255 | |
|
256 | 256 | # step 2: query Watchman |
|
257 | 257 | try: |
|
258 | 258 | # Use the user-configured timeout for the query. |
|
259 | 259 | # Add a little slack over the top of the user query to allow for |
|
260 | 260 | # overheads while transferring the data |
|
261 | 261 | self._watchmanclient.settimeout(state.timeout + 0.1) |
|
262 | 262 | result = self._watchmanclient.command('query', { |
|
263 | 263 | 'fields': ['mode', 'mtime', 'size', 'exists', 'name'], |
|
264 | 264 | 'since': clock, |
|
265 | 265 | 'expression': [ |
|
266 | 266 | 'not', [ |
|
267 | 267 | 'anyof', ['dirname', '.hg'], |
|
268 | 268 | ['name', '.hg', 'wholename'] |
|
269 | 269 | ] |
|
270 | 270 | ], |
|
271 | 271 | 'sync_timeout': int(state.timeout * 1000), |
|
272 | 272 | 'empty_on_fresh_instance': state.walk_on_invalidate, |
|
273 | 273 | }) |
|
274 | 274 | except Exception as ex: |
|
275 | 275 | _handleunavailable(self._ui, state, ex) |
|
276 | 276 | self._watchmanclient.clearconnection() |
|
277 | 277 | return bail() |
|
278 | 278 | else: |
|
279 | 279 | # We need to propagate the last observed clock up so that we |
|
280 | 280 | # can use it for our next query |
|
281 | 281 | state.setlastclock(result['clock']) |
|
282 | 282 | if result['is_fresh_instance']: |
|
283 | 283 | if state.walk_on_invalidate: |
|
284 | 284 | state.invalidate() |
|
285 | 285 | return bail() |
|
286 | 286 | fresh_instance = True |
|
287 | 287 | # Ignore any prior noteable files from the state info |
|
288 | 288 | notefiles = [] |
|
289 | 289 | |
|
290 | 290 | # for file paths which require normalization and we encounter a case |
|
291 | 291 | # collision, we store our own foldmap |
|
292 | 292 | if normalize: |
|
293 | 293 | foldmap = dict((normcase(k), k) for k in results) |
|
294 | 294 | |
|
295 | 295 | switch_slashes = os.sep == '\\' |
|
296 | 296 | # The order of the results is, strictly speaking, undefined. |
|
297 | 297 | # For case changes on a case insensitive filesystem we may receive |
|
298 | 298 | # two entries, one with exists=True and another with exists=False. |
|
299 | 299 | # The exists=True entries in the same response should be interpreted |
|
300 | 300 | # as being happens-after the exists=False entries due to the way that |
|
301 | 301 | # Watchman tracks files. We use this property to reconcile deletes |
|
302 | 302 | # for name case changes. |
|
303 | 303 | for entry in result['files']: |
|
304 | 304 | fname = entry['name'] |
|
305 | 305 | if switch_slashes: |
|
306 | 306 | fname = fname.replace('\\', '/') |
|
307 | 307 | if normalize: |
|
308 | 308 | normed = normcase(fname) |
|
309 | 309 | fname = normalize(fname, True, True) |
|
310 | 310 | foldmap[normed] = fname |
|
311 | 311 | fmode = entry['mode'] |
|
312 | 312 | fexists = entry['exists'] |
|
313 | 313 | kind = getkind(fmode) |
|
314 | 314 | |
|
315 | 315 | if not fexists: |
|
316 | 316 | # if marked as deleted and we don't already have a change |
|
317 | 317 | # record, mark it as deleted. If we already have an entry |
|
318 | 318 | # for fname then it was either part of walkexplicit or was |
|
319 | 319 | # an earlier result that was a case change |
|
320 | 320 | if fname not in results and fname in dmap and ( |
|
321 | 321 | matchalways or matchfn(fname)): |
|
322 | 322 | results[fname] = None |
|
323 | 323 | elif kind == dirkind: |
|
324 | 324 | if fname in dmap and (matchalways or matchfn(fname)): |
|
325 | 325 | results[fname] = None |
|
326 | 326 | elif kind == regkind or kind == lnkkind: |
|
327 | 327 | if fname in dmap: |
|
328 | 328 | if matchalways or matchfn(fname): |
|
329 | 329 | results[fname] = entry |
|
330 | 330 | elif (matchalways or matchfn(fname)) and not ignore(fname): |
|
331 | 331 | results[fname] = entry |
|
332 | 332 | elif fname in dmap and (matchalways or matchfn(fname)): |
|
333 | 333 | results[fname] = None |
|
334 | 334 | |
|
335 | 335 | # step 3: query notable files we don't already know about |
|
336 | 336 | # XXX try not to iterate over the entire dmap |
|
337 | 337 | if normalize: |
|
338 | 338 | # any notable files that have changed case will already be handled |
|
339 | 339 | # above, so just check membership in the foldmap |
|
340 | 340 | notefiles = set((normalize(f, True, True) for f in notefiles |
|
341 | 341 | if normcase(f) not in foldmap)) |
|
342 | 342 | visit = set((f for f in notefiles if (f not in results and matchfn(f) |
|
343 | 343 | and (f in dmap or not ignore(f))))) |
|
344 | 344 | |
|
345 | 345 | if nonnormalset is not None and not fresh_instance: |
|
346 | 346 | if matchalways: |
|
347 | 347 | visit.update(f for f in nonnormalset if f not in results) |
|
348 | 348 | visit.update(f for f in copymap if f not in results) |
|
349 | 349 | else: |
|
350 | 350 | visit.update(f for f in nonnormalset |
|
351 | 351 | if f not in results and matchfn(f)) |
|
352 | 352 | visit.update(f for f in copymap |
|
353 | 353 | if f not in results and matchfn(f)) |
|
354 | 354 | else: |
|
355 | 355 | if matchalways: |
|
356 | 356 | visit.update(f for f, st in dmap.iteritems() |
|
357 | 357 | if (f not in results and |
|
358 | 358 | (st[2] < 0 or st[0] != 'n' or fresh_instance))) |
|
359 | 359 | visit.update(f for f in copymap if f not in results) |
|
360 | 360 | else: |
|
361 | 361 | visit.update(f for f, st in dmap.iteritems() |
|
362 | 362 | if (f not in results and |
|
363 | 363 | (st[2] < 0 or st[0] != 'n' or fresh_instance) |
|
364 | 364 | and matchfn(f))) |
|
365 | 365 | visit.update(f for f in copymap |
|
366 | 366 | if f not in results and matchfn(f)) |
|
367 | 367 | |
|
368 | 368 | audit = pathutil.pathauditor(self._root).check |
|
369 | 369 | auditpass = [f for f in visit if audit(f)] |
|
370 | 370 | auditpass.sort() |
|
371 | 371 | auditfail = visit.difference(auditpass) |
|
372 | 372 | for f in auditfail: |
|
373 | 373 | results[f] = None |
|
374 | 374 | |
|
375 | 375 | nf = iter(auditpass).next |
|
376 | 376 | for st in util.statfiles([join(f) for f in auditpass]): |
|
377 | 377 | f = nf() |
|
378 | 378 | if st or f in dmap: |
|
379 | 379 | results[f] = st |
|
380 | 380 | |
|
381 | 381 | for s in subrepos: |
|
382 | 382 | del results[s] |
|
383 | 383 | del results['.hg'] |
|
384 | 384 | return results |
|
385 | 385 | |
|
386 | 386 | def overridestatus( |
|
387 | 387 | orig, self, node1='.', node2=None, match=None, ignored=False, |
|
388 | 388 | clean=False, unknown=False, listsubrepos=False): |
|
389 | 389 | listignored = ignored |
|
390 | 390 | listclean = clean |
|
391 | 391 | listunknown = unknown |
|
392 | 392 | |
|
393 | 393 | def _cmpsets(l1, l2): |
|
394 | 394 | try: |
|
395 | 395 | if 'FSMONITOR_LOG_FILE' in os.environ: |
|
396 | 396 | fn = os.environ['FSMONITOR_LOG_FILE'] |
|
397 | 397 | f = open(fn, 'wb') |
|
398 | 398 | else: |
|
399 | 399 | fn = 'fsmonitorfail.log' |
|
400 | 400 | f = self.opener(fn, 'wb') |
|
401 | 401 | except (IOError, OSError): |
|
402 | 402 | self.ui.warn(_('warning: unable to write to %s\n') % fn) |
|
403 | 403 | return |
|
404 | 404 | |
|
405 | 405 | try: |
|
406 | 406 | for i, (s1, s2) in enumerate(zip(l1, l2)): |
|
407 | 407 | if set(s1) != set(s2): |
|
408 | 408 | f.write('sets at position %d are unequal\n' % i) |
|
409 | 409 | f.write('watchman returned: %s\n' % s1) |
|
410 | 410 | f.write('stat returned: %s\n' % s2) |
|
411 | 411 | finally: |
|
412 | 412 | f.close() |
|
413 | 413 | |
|
414 | 414 | if isinstance(node1, context.changectx): |
|
415 | 415 | ctx1 = node1 |
|
416 | 416 | else: |
|
417 | 417 | ctx1 = self[node1] |
|
418 | 418 | if isinstance(node2, context.changectx): |
|
419 | 419 | ctx2 = node2 |
|
420 | 420 | else: |
|
421 | 421 | ctx2 = self[node2] |
|
422 | 422 | |
|
423 | 423 | working = ctx2.rev() is None |
|
424 | 424 | parentworking = working and ctx1 == self['.'] |
|
425 | 425 | match = match or matchmod.always(self.root, self.getcwd()) |
|
426 | 426 | |
|
427 | 427 | # Maybe we can use this opportunity to update Watchman's state. |
|
428 | 428 | # Mercurial uses workingcommitctx and/or memctx to represent the part of |
|
429 | 429 | # the workingctx that is to be committed. So don't update the state in |
|
430 | 430 | # that case. |
|
431 | 431 | # HG_PENDING is set in the environment when the dirstate is being updated |
|
432 | 432 | # in the middle of a transaction; we must not update our state in that |
|
433 | 433 | # case, or we risk forgetting about changes in the working copy. |
|
434 | 434 | updatestate = (parentworking and match.always() and |
|
435 | 435 | not isinstance(ctx2, (context.workingcommitctx, |
|
436 | 436 | context.memctx)) and |
|
437 | 437 | 'HG_PENDING' not in os.environ) |
|
438 | 438 | |
|
439 | 439 | try: |
|
440 | 440 | if self._fsmonitorstate.walk_on_invalidate: |
|
441 | 441 | # Use a short timeout to query the current clock. If that |
|
442 | 442 | # takes too long then we assume that the service will be slow |
|
443 | 443 | # to answer our query. |
|
444 | 444 | # walk_on_invalidate indicates that we prefer to walk the |
|
445 | 445 | # tree ourselves because we can ignore portions that Watchman |
|
446 | 446 | # cannot and we tend to be faster in the warmer buffer cache |
|
447 | 447 | # cases. |
|
448 | 448 | self._watchmanclient.settimeout(0.1) |
|
449 | 449 | else: |
|
450 | 450 | # Give Watchman more time to potentially complete its walk |
|
451 | 451 | # and return the initial clock. In this mode we assume that |
|
452 | 452 | # the filesystem will be slower than parsing a potentially |
|
453 | 453 | # very large Watchman result set. |
|
454 | 454 | self._watchmanclient.settimeout( |
|
455 | 455 | self._fsmonitorstate.timeout + 0.1) |
|
456 | 456 | startclock = self._watchmanclient.getcurrentclock() |
|
457 | 457 | except Exception as ex: |
|
458 | 458 | self._watchmanclient.clearconnection() |
|
459 | 459 | _handleunavailable(self.ui, self._fsmonitorstate, ex) |
|
460 | 460 | # boo, Watchman failed. bail |
|
461 | 461 | return orig(node1, node2, match, listignored, listclean, |
|
462 | 462 | listunknown, listsubrepos) |
|
463 | 463 | |
|
464 | 464 | if updatestate: |
|
465 | 465 | # We need info about unknown files. This may make things slower the |
|
466 | 466 | # first time, but whatever. |
|
467 | 467 | stateunknown = True |
|
468 | 468 | else: |
|
469 | 469 | stateunknown = listunknown |
|
470 | 470 | |
|
471 | 471 | r = orig(node1, node2, match, listignored, listclean, stateunknown, |
|
472 | 472 | listsubrepos) |
|
473 | 473 | modified, added, removed, deleted, unknown, ignored, clean = r |
|
474 | 474 | |
|
475 | 475 | if updatestate: |
|
476 | 476 | notefiles = modified + added + removed + deleted + unknown |
|
477 | 477 | self._fsmonitorstate.set( |
|
478 | 478 | self._fsmonitorstate.getlastclock() or startclock, |
|
479 | 479 | _hashignore(self.dirstate._ignore), |
|
480 | 480 | notefiles) |
|
481 | 481 | |
|
482 | 482 | if not listunknown: |
|
483 | 483 | unknown = [] |
|
484 | 484 | |
|
485 | 485 | # don't do paranoid checks if we're not going to query Watchman anyway |
|
486 | 486 | full = listclean or match.traversedir is not None |
|
487 | 487 | if self._fsmonitorstate.mode == 'paranoid' and not full: |
|
488 | 488 | # run status again and fall back to the old walk this time |
|
489 | 489 | self.dirstate._fsmonitordisable = True |
|
490 | 490 | |
|
491 | 491 | # shut the UI up |
|
492 | 492 | quiet = self.ui.quiet |
|
493 | 493 | self.ui.quiet = True |
|
494 | 494 | fout, ferr = self.ui.fout, self.ui.ferr |
|
495 | 495 | self.ui.fout = self.ui.ferr = open(os.devnull, 'wb') |
|
496 | 496 | |
|
497 | 497 | try: |
|
498 | 498 | rv2 = orig( |
|
499 | 499 | node1, node2, match, listignored, listclean, listunknown, |
|
500 | 500 | listsubrepos) |
|
501 | 501 | finally: |
|
502 | 502 | self.dirstate._fsmonitordisable = False |
|
503 | 503 | self.ui.quiet = quiet |
|
504 | 504 | self.ui.fout, self.ui.ferr = fout, ferr |
|
505 | 505 | |
|
506 | 506 | # clean isn't tested since it's set to True above |
|
507 | 507 | _cmpsets([modified, added, removed, deleted, unknown, ignored, clean], |
|
508 | 508 | rv2) |
|
509 | 509 | modified, added, removed, deleted, unknown, ignored, clean = rv2 |
|
510 | 510 | |
|
511 | 511 | return scmutil.status( |
|
512 | 512 | modified, added, removed, deleted, unknown, ignored, clean) |
|
513 | 513 | |
|
514 | 514 | def makedirstate(cls): |
|
515 | 515 | class fsmonitordirstate(cls): |
|
516 | 516 | def _fsmonitorinit(self, fsmonitorstate, watchmanclient): |
|
517 | 517 | # _fsmonitordisable is used in paranoid mode |
|
518 | 518 | self._fsmonitordisable = False |
|
519 | 519 | self._fsmonitorstate = fsmonitorstate |
|
520 | 520 | self._watchmanclient = watchmanclient |
|
521 | 521 | |
|
522 | 522 | def walk(self, *args, **kwargs): |
|
523 | 523 | orig = super(fsmonitordirstate, self).walk |
|
524 | 524 | if self._fsmonitordisable: |
|
525 | 525 | return orig(*args, **kwargs) |
|
526 | 526 | return overridewalk(orig, self, *args, **kwargs) |
|
527 | 527 | |
|
528 | 528 | def rebuild(self, *args, **kwargs): |
|
529 | 529 | self._fsmonitorstate.invalidate() |
|
530 | 530 | return super(fsmonitordirstate, self).rebuild(*args, **kwargs) |
|
531 | 531 | |
|
532 | 532 | def invalidate(self, *args, **kwargs): |
|
533 | 533 | self._fsmonitorstate.invalidate() |
|
534 | 534 | return super(fsmonitordirstate, self).invalidate(*args, **kwargs) |
|
535 | 535 | |
|
536 | 536 | return fsmonitordirstate |
|
537 | 537 | |
|
538 | 538 | def wrapdirstate(orig, self): |
|
539 | 539 | ds = orig(self) |
|
540 | 540 | # only override the dirstate when Watchman is available for the repo |
|
541 | 541 | if util.safehasattr(self, '_fsmonitorstate'): |
|
542 | 542 | ds.__class__ = makedirstate(ds.__class__) |
|
543 | 543 | ds._fsmonitorinit(self._fsmonitorstate, self._watchmanclient) |
|
544 | 544 | return ds |
|
545 | 545 | |
|
546 | 546 | def extsetup(ui): |
|
547 | 547 | wrapfilecache(localrepo.localrepository, 'dirstate', wrapdirstate) |
|
548 | 548 | if sys.platform == 'darwin': |
|
549 | 549 | # An assist for avoiding the dangling-symlink fsevents bug |
|
550 | 550 | extensions.wrapfunction(os, 'symlink', wrapsymlink) |
|
551 | 551 | |
|
552 | 552 | extensions.wrapfunction(merge, 'update', wrapupdate) |
|
553 | 553 | |
|
554 | 554 | def wrapsymlink(orig, source, link_name): |
|
555 | 555 | ''' if we create a dangling symlink, also touch the parent dir |
|
556 | 556 | to encourage fsevents notifications to work more correctly ''' |
|
557 | 557 | try: |
|
558 | 558 | return orig(source, link_name) |
|
559 | 559 | finally: |
|
560 | 560 | try: |
|
561 | 561 | os.utime(os.path.dirname(link_name), None) |
|
562 | 562 | except OSError: |
|
563 | 563 | pass |
|
564 | 564 | |
|
565 | 565 | class state_update(object): |
|
566 |
''' This context mana |
|
|
566 | ''' This context manager is responsible for dispatching the state-enter | |
|
567 | 567 | and state-leave signals to the watchman service ''' |
|
568 | 568 | |
|
569 | 569 | def __init__(self, repo, node, distance, partial): |
|
570 | 570 | self.repo = repo |
|
571 | 571 | self.node = node |
|
572 | 572 | self.distance = distance |
|
573 | 573 | self.partial = partial |
|
574 | 574 | |
|
575 | 575 | def __enter__(self): |
|
576 | 576 | self._state('state-enter') |
|
577 | 577 | return self |
|
578 | 578 | |
|
579 | 579 | def __exit__(self, type_, value, tb): |
|
580 | 580 | status = 'ok' if type_ is None else 'failed' |
|
581 | 581 | self._state('state-leave', status=status) |
|
582 | 582 | |
|
583 | 583 | def _state(self, cmd, status='ok'): |
|
584 | 584 | if not util.safehasattr(self.repo, '_watchmanclient'): |
|
585 | 585 | return |
|
586 | 586 | try: |
|
587 | 587 | commithash = self.repo[self.node].hex() |
|
588 | 588 | self.repo._watchmanclient.command(cmd, { |
|
589 | 589 | 'name': 'hg.update', |
|
590 | 590 | 'metadata': { |
|
591 | 591 | # the target revision |
|
592 | 592 | 'rev': commithash, |
|
593 | 593 | # approximate number of commits between current and target |
|
594 | 594 | 'distance': self.distance, |
|
595 | 595 | # success/failure (only really meaningful for state-leave) |
|
596 | 596 | 'status': status, |
|
597 | 597 | # whether the working copy parent is changing |
|
598 | 598 | 'partial': self.partial, |
|
599 | 599 | }}) |
|
600 | 600 | except Exception as e: |
|
601 | 601 | # Swallow any errors; fire and forget |
|
602 | 602 | self.repo.ui.log( |
|
603 | 603 | 'watchman', 'Exception %s while running %s\n', e, cmd) |
|
604 | 604 | |
|
605 | 605 | # Bracket working copy updates with calls to the watchman state-enter |
|
606 | 606 | # and state-leave commands. This allows clients to perform more intelligent |
|
607 | 607 | # settling during bulk file change scenarios |
|
608 | 608 | # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling |
|
609 | 609 | def wrapupdate(orig, repo, node, branchmerge, force, ancestor=None, |
|
610 | 610 | mergeancestor=False, labels=None, matcher=None, **kwargs): |
|
611 | 611 | |
|
612 | 612 | distance = 0 |
|
613 | 613 | partial = True |
|
614 | 614 | if matcher is None or matcher.always(): |
|
615 | 615 | partial = False |
|
616 | 616 | wc = repo[None] |
|
617 | 617 | parents = wc.parents() |
|
618 | 618 | if len(parents) == 2: |
|
619 | 619 | anc = repo.changelog.ancestor(parents[0].node(), parents[1].node()) |
|
620 | 620 | ancrev = repo[anc].rev() |
|
621 | 621 | distance = abs(repo[node].rev() - ancrev) |
|
622 | 622 | elif len(parents) == 1: |
|
623 | 623 | distance = abs(repo[node].rev() - parents[0].rev()) |
|
624 | 624 | |
|
625 | 625 | with state_update(repo, node, distance, partial): |
|
626 | 626 | return orig( |
|
627 | 627 | repo, node, branchmerge, force, ancestor, mergeancestor, |
|
628 | 628 | labels, matcher, *kwargs) |
|
629 | 629 | |
|
630 | 630 | def reposetup(ui, repo): |
|
631 | 631 | # We don't work with largefiles or inotify |
|
632 | 632 | exts = extensions.enabled() |
|
633 | 633 | for ext in _blacklist: |
|
634 | 634 | if ext in exts: |
|
635 | 635 | ui.warn(_('The fsmonitor extension is incompatible with the %s ' |
|
636 | 636 | 'extension and has been disabled.\n') % ext) |
|
637 | 637 | return |
|
638 | 638 | |
|
639 | 639 | if util.safehasattr(repo, 'dirstate'): |
|
640 | 640 | # We don't work with subrepos either. Note that we can get passed in |
|
641 | 641 | # e.g. a statichttprepo, which throws on trying to access the substate. |
|
642 | 642 | # XXX This sucks. |
|
643 | 643 | try: |
|
644 | 644 | # if repo[None].substate can cause a dirstate parse, which is too |
|
645 | 645 | # slow. Instead, look for a file called hgsubstate, |
|
646 | 646 | if repo.wvfs.exists('.hgsubstate') or repo.wvfs.exists('.hgsub'): |
|
647 | 647 | return |
|
648 | 648 | except AttributeError: |
|
649 | 649 | return |
|
650 | 650 | |
|
651 | 651 | fsmonitorstate = state.state(repo) |
|
652 | 652 | if fsmonitorstate.mode == 'off': |
|
653 | 653 | return |
|
654 | 654 | |
|
655 | 655 | try: |
|
656 | 656 | client = watchmanclient.client(repo) |
|
657 | 657 | except Exception as ex: |
|
658 | 658 | _handleunavailable(ui, fsmonitorstate, ex) |
|
659 | 659 | return |
|
660 | 660 | |
|
661 | 661 | repo._fsmonitorstate = fsmonitorstate |
|
662 | 662 | repo._watchmanclient = client |
|
663 | 663 | |
|
664 | 664 | # at this point since fsmonitorstate wasn't present, repo.dirstate is |
|
665 | 665 | # not a fsmonitordirstate |
|
666 | 666 | repo.dirstate.__class__ = makedirstate(repo.dirstate.__class__) |
|
667 | 667 | # nuke the dirstate so that _fsmonitorinit and subsequent configuration |
|
668 | 668 | # changes take effect on it |
|
669 | 669 | del repo._filecache['dirstate'] |
|
670 | 670 | delattr(repo.unfiltered(), 'dirstate') |
|
671 | 671 | |
|
672 | 672 | class fsmonitorrepo(repo.__class__): |
|
673 | 673 | def status(self, *args, **kwargs): |
|
674 | 674 | orig = super(fsmonitorrepo, self).status |
|
675 | 675 | return overridestatus(orig, self, *args, **kwargs) |
|
676 | 676 | |
|
677 | 677 | repo.__class__ = fsmonitorrepo |
|
678 | 678 | |
|
679 | 679 | def wrapfilecache(cls, propname, wrapper): |
|
680 | 680 | """Wraps a filecache property. These can't be wrapped using the normal |
|
681 | 681 | wrapfunction. This should eventually go into upstream Mercurial. |
|
682 | 682 | """ |
|
683 | 683 | assert callable(wrapper) |
|
684 | 684 | for currcls in cls.__mro__: |
|
685 | 685 | if propname in currcls.__dict__: |
|
686 | 686 | origfn = currcls.__dict__[propname].func |
|
687 | 687 | assert callable(origfn) |
|
688 | 688 | def wrap(*args, **kwargs): |
|
689 | 689 | return wrapper(origfn, *args, **kwargs) |
|
690 | 690 | currcls.__dict__[propname].func = wrap |
|
691 | 691 | break |
|
692 | 692 | |
|
693 | 693 | if currcls is object: |
|
694 | 694 | raise AttributeError( |
|
695 | 695 | _("type '%s' has no property '%s'") % (cls, propname)) |
@@ -1,1603 +1,1603 | |||
|
1 | 1 | # histedit.py - interactive history editing for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2009 Augie Fackler <raf@durin42.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | """interactive history editing |
|
8 | 8 | |
|
9 | 9 | With this extension installed, Mercurial gains one new command: histedit. Usage |
|
10 | 10 | is as follows, assuming the following history:: |
|
11 | 11 | |
|
12 | 12 | @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42 |
|
13 | 13 | | Add delta |
|
14 | 14 | | |
|
15 | 15 | o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42 |
|
16 | 16 | | Add gamma |
|
17 | 17 | | |
|
18 | 18 | o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42 |
|
19 | 19 | | Add beta |
|
20 | 20 | | |
|
21 | 21 | o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42 |
|
22 | 22 | Add alpha |
|
23 | 23 | |
|
24 | 24 | If you were to run ``hg histedit c561b4e977df``, you would see the following |
|
25 | 25 | file open in your editor:: |
|
26 | 26 | |
|
27 | 27 | pick c561b4e977df Add beta |
|
28 | 28 | pick 030b686bedc4 Add gamma |
|
29 | 29 | pick 7c2fd3b9020c Add delta |
|
30 | 30 | |
|
31 | 31 | # Edit history between c561b4e977df and 7c2fd3b9020c |
|
32 | 32 | # |
|
33 | 33 | # Commits are listed from least to most recent |
|
34 | 34 | # |
|
35 | 35 | # Commands: |
|
36 | 36 | # p, pick = use commit |
|
37 | 37 | # e, edit = use commit, but stop for amending |
|
38 | 38 | # f, fold = use commit, but combine it with the one above |
|
39 | 39 | # r, roll = like fold, but discard this commit's description |
|
40 | 40 | # d, drop = remove commit from history |
|
41 | 41 | # m, mess = edit commit message without changing commit content |
|
42 | 42 | # |
|
43 | 43 | |
|
44 | 44 | In this file, lines beginning with ``#`` are ignored. You must specify a rule |
|
45 | 45 | for each revision in your history. For example, if you had meant to add gamma |
|
46 | 46 | before beta, and then wanted to add delta in the same revision as beta, you |
|
47 | 47 | would reorganize the file to look like this:: |
|
48 | 48 | |
|
49 | 49 | pick 030b686bedc4 Add gamma |
|
50 | 50 | pick c561b4e977df Add beta |
|
51 | 51 | fold 7c2fd3b9020c Add delta |
|
52 | 52 | |
|
53 | 53 | # Edit history between c561b4e977df and 7c2fd3b9020c |
|
54 | 54 | # |
|
55 | 55 | # Commits are listed from least to most recent |
|
56 | 56 | # |
|
57 | 57 | # Commands: |
|
58 | 58 | # p, pick = use commit |
|
59 | 59 | # e, edit = use commit, but stop for amending |
|
60 | 60 | # f, fold = use commit, but combine it with the one above |
|
61 | 61 | # r, roll = like fold, but discard this commit's description |
|
62 | 62 | # d, drop = remove commit from history |
|
63 | 63 | # m, mess = edit commit message without changing commit content |
|
64 | 64 | # |
|
65 | 65 | |
|
66 | 66 | At which point you close the editor and ``histedit`` starts working. When you |
|
67 | 67 | specify a ``fold`` operation, ``histedit`` will open an editor when it folds |
|
68 | 68 | those revisions together, offering you a chance to clean up the commit message:: |
|
69 | 69 | |
|
70 | 70 | Add beta |
|
71 | 71 | *** |
|
72 | 72 | Add delta |
|
73 | 73 | |
|
74 | 74 | Edit the commit message to your liking, then close the editor. For |
|
75 | 75 | this example, let's assume that the commit message was changed to |
|
76 | 76 | ``Add beta and delta.`` After histedit has run and had a chance to |
|
77 | 77 | remove any old or temporary revisions it needed, the history looks |
|
78 | 78 | like this:: |
|
79 | 79 | |
|
80 | 80 | @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42 |
|
81 | 81 | | Add beta and delta. |
|
82 | 82 | | |
|
83 | 83 | o 1 081603921c3f 2009-04-27 18:04 -0500 durin42 |
|
84 | 84 | | Add gamma |
|
85 | 85 | | |
|
86 | 86 | o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42 |
|
87 | 87 | Add alpha |
|
88 | 88 | |
|
89 | 89 | Note that ``histedit`` does *not* remove any revisions (even its own temporary |
|
90 | 90 | ones) until after it has completed all the editing operations, so it will |
|
91 | 91 | probably perform several strip operations when it's done. For the above example, |
|
92 | 92 | it had to run strip twice. Strip can be slow depending on a variety of factors, |
|
93 | 93 | so you might need to be a little patient. You can choose to keep the original |
|
94 | 94 | revisions by passing the ``--keep`` flag. |
|
95 | 95 | |
|
96 | 96 | The ``edit`` operation will drop you back to a command prompt, |
|
97 | 97 | allowing you to edit files freely, or even use ``hg record`` to commit |
|
98 | 98 | some changes as a separate commit. When you're done, any remaining |
|
99 | 99 | uncommitted changes will be committed as well. When done, run ``hg |
|
100 | 100 | histedit --continue`` to finish this step. You'll be prompted for a |
|
101 | 101 | new commit message, but the default commit message will be the |
|
102 | 102 | original message for the ``edit`` ed revision. |
|
103 | 103 | |
|
104 | 104 | The ``message`` operation will give you a chance to revise a commit |
|
105 | 105 | message without changing the contents. It's a shortcut for doing |
|
106 | 106 | ``edit`` immediately followed by `hg histedit --continue``. |
|
107 | 107 | |
|
108 | 108 | If ``histedit`` encounters a conflict when moving a revision (while |
|
109 | 109 | handling ``pick`` or ``fold``), it'll stop in a similar manner to |
|
110 | 110 | ``edit`` with the difference that it won't prompt you for a commit |
|
111 | 111 | message when done. If you decide at this point that you don't like how |
|
112 | 112 | much work it will be to rearrange history, or that you made a mistake, |
|
113 | 113 | you can use ``hg histedit --abort`` to abandon the new changes you |
|
114 | 114 | have made and return to the state before you attempted to edit your |
|
115 | 115 | history. |
|
116 | 116 | |
|
117 | 117 | If we clone the histedit-ed example repository above and add four more |
|
118 | 118 | changes, such that we have the following history:: |
|
119 | 119 | |
|
120 | 120 | @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan |
|
121 | 121 | | Add theta |
|
122 | 122 | | |
|
123 | 123 | o 5 140988835471 2009-04-27 18:04 -0500 stefan |
|
124 | 124 | | Add eta |
|
125 | 125 | | |
|
126 | 126 | o 4 122930637314 2009-04-27 18:04 -0500 stefan |
|
127 | 127 | | Add zeta |
|
128 | 128 | | |
|
129 | 129 | o 3 836302820282 2009-04-27 18:04 -0500 stefan |
|
130 | 130 | | Add epsilon |
|
131 | 131 | | |
|
132 | 132 | o 2 989b4d060121 2009-04-27 18:04 -0500 durin42 |
|
133 | 133 | | Add beta and delta. |
|
134 | 134 | | |
|
135 | 135 | o 1 081603921c3f 2009-04-27 18:04 -0500 durin42 |
|
136 | 136 | | Add gamma |
|
137 | 137 | | |
|
138 | 138 | o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42 |
|
139 | 139 | Add alpha |
|
140 | 140 | |
|
141 | 141 | If you run ``hg histedit --outgoing`` on the clone then it is the same |
|
142 | 142 | as running ``hg histedit 836302820282``. If you need plan to push to a |
|
143 | 143 | repository that Mercurial does not detect to be related to the source |
|
144 | 144 | repo, you can add a ``--force`` option. |
|
145 | 145 | |
|
146 | 146 | Config |
|
147 | 147 | ------ |
|
148 | 148 | |
|
149 | 149 | Histedit rule lines are truncated to 80 characters by default. You |
|
150 | 150 | can customize this behavior by setting a different length in your |
|
151 | 151 | configuration file:: |
|
152 | 152 | |
|
153 | 153 | [histedit] |
|
154 | 154 | linelen = 120 # truncate rule lines at 120 characters |
|
155 | 155 | |
|
156 | 156 | ``hg histedit`` attempts to automatically choose an appropriate base |
|
157 | 157 | revision to use. To change which base revision is used, define a |
|
158 | 158 | revset in your configuration file:: |
|
159 | 159 | |
|
160 | 160 | [histedit] |
|
161 | 161 | defaultrev = only(.) & draft() |
|
162 | 162 | |
|
163 | 163 | By default each edited revision needs to be present in histedit commands. |
|
164 | 164 | To remove revision you need to use ``drop`` operation. You can configure |
|
165 | 165 | the drop to be implicit for missing commits by adding:: |
|
166 | 166 | |
|
167 | 167 | [histedit] |
|
168 | 168 | dropmissing = True |
|
169 | 169 | |
|
170 | 170 | """ |
|
171 | 171 | |
|
172 | 172 | from __future__ import absolute_import |
|
173 | 173 | |
|
174 | 174 | import errno |
|
175 | 175 | import os |
|
176 | 176 | |
|
177 | 177 | from mercurial.i18n import _ |
|
178 | 178 | from mercurial import ( |
|
179 | 179 | bundle2, |
|
180 | 180 | cmdutil, |
|
181 | 181 | context, |
|
182 | 182 | copies, |
|
183 | 183 | destutil, |
|
184 | 184 | discovery, |
|
185 | 185 | error, |
|
186 | 186 | exchange, |
|
187 | 187 | extensions, |
|
188 | 188 | hg, |
|
189 | 189 | lock, |
|
190 | 190 | merge as mergemod, |
|
191 | 191 | node, |
|
192 | 192 | obsolete, |
|
193 | 193 | repair, |
|
194 | 194 | scmutil, |
|
195 | 195 | util, |
|
196 | 196 | ) |
|
197 | 197 | |
|
198 | 198 | pickle = util.pickle |
|
199 | 199 | release = lock.release |
|
200 | 200 | cmdtable = {} |
|
201 | 201 | command = cmdutil.command(cmdtable) |
|
202 | 202 | |
|
203 | 203 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
204 | 204 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
205 | 205 | # be specifying the version(s) of Mercurial they are tested with, or |
|
206 | 206 | # leave the attribute unspecified. |
|
207 | 207 | testedwith = 'ships-with-hg-core' |
|
208 | 208 | |
|
209 | 209 | actiontable = {} |
|
210 | 210 | primaryactions = set() |
|
211 | 211 | secondaryactions = set() |
|
212 | 212 | tertiaryactions = set() |
|
213 | 213 | internalactions = set() |
|
214 | 214 | |
|
215 | 215 | def geteditcomment(ui, first, last): |
|
216 | 216 | """ construct the editor comment |
|
217 | 217 | The comment includes:: |
|
218 | 218 | - an intro |
|
219 | 219 | - sorted primary commands |
|
220 | 220 | - sorted short commands |
|
221 | 221 | - sorted long commands |
|
222 | 222 | - additional hints |
|
223 | 223 | |
|
224 | 224 | Commands are only included once. |
|
225 | 225 | """ |
|
226 | 226 | intro = _("""Edit history between %s and %s |
|
227 | 227 | |
|
228 | 228 | Commits are listed from least to most recent |
|
229 | 229 | |
|
230 | 230 | You can reorder changesets by reordering the lines |
|
231 | 231 | |
|
232 | 232 | Commands: |
|
233 | 233 | """) |
|
234 | 234 | actions = [] |
|
235 | 235 | def addverb(v): |
|
236 | 236 | a = actiontable[v] |
|
237 | 237 | lines = a.message.split("\n") |
|
238 | 238 | if len(a.verbs): |
|
239 | 239 | v = ', '.join(sorted(a.verbs, key=lambda v: len(v))) |
|
240 | 240 | actions.append(" %s = %s" % (v, lines[0])) |
|
241 | 241 | actions.extend([' %s' for l in lines[1:]]) |
|
242 | 242 | |
|
243 | 243 | for v in ( |
|
244 | 244 | sorted(primaryactions) + |
|
245 | 245 | sorted(secondaryactions) + |
|
246 | 246 | sorted(tertiaryactions) |
|
247 | 247 | ): |
|
248 | 248 | addverb(v) |
|
249 | 249 | actions.append('') |
|
250 | 250 | |
|
251 | 251 | hints = [] |
|
252 | 252 | if ui.configbool('histedit', 'dropmissing'): |
|
253 | 253 | hints.append("Deleting a changeset from the list " |
|
254 | 254 | "will DISCARD it from the edited history!") |
|
255 | 255 | |
|
256 | 256 | lines = (intro % (first, last)).split('\n') + actions + hints |
|
257 | 257 | |
|
258 | 258 | return ''.join(['# %s\n' % l if l else '#\n' for l in lines]) |
|
259 | 259 | |
|
260 | 260 | class histeditstate(object): |
|
261 | 261 | def __init__(self, repo, parentctxnode=None, actions=None, keep=None, |
|
262 | 262 | topmost=None, replacements=None, lock=None, wlock=None): |
|
263 | 263 | self.repo = repo |
|
264 | 264 | self.actions = actions |
|
265 | 265 | self.keep = keep |
|
266 | 266 | self.topmost = topmost |
|
267 | 267 | self.parentctxnode = parentctxnode |
|
268 | 268 | self.lock = lock |
|
269 | 269 | self.wlock = wlock |
|
270 | 270 | self.backupfile = None |
|
271 | 271 | if replacements is None: |
|
272 | 272 | self.replacements = [] |
|
273 | 273 | else: |
|
274 | 274 | self.replacements = replacements |
|
275 | 275 | |
|
276 | 276 | def read(self): |
|
277 | 277 | """Load histedit state from disk and set fields appropriately.""" |
|
278 | 278 | try: |
|
279 | 279 | state = self.repo.vfs.read('histedit-state') |
|
280 | 280 | except IOError as err: |
|
281 | 281 | if err.errno != errno.ENOENT: |
|
282 | 282 | raise |
|
283 | 283 | cmdutil.wrongtooltocontinue(self.repo, _('histedit')) |
|
284 | 284 | |
|
285 | 285 | if state.startswith('v1\n'): |
|
286 | 286 | data = self._load() |
|
287 | 287 | parentctxnode, rules, keep, topmost, replacements, backupfile = data |
|
288 | 288 | else: |
|
289 | 289 | data = pickle.loads(state) |
|
290 | 290 | parentctxnode, rules, keep, topmost, replacements = data |
|
291 | 291 | backupfile = None |
|
292 | 292 | |
|
293 | 293 | self.parentctxnode = parentctxnode |
|
294 | 294 | rules = "\n".join(["%s %s" % (verb, rest) for [verb, rest] in rules]) |
|
295 | 295 | actions = parserules(rules, self) |
|
296 | 296 | self.actions = actions |
|
297 | 297 | self.keep = keep |
|
298 | 298 | self.topmost = topmost |
|
299 | 299 | self.replacements = replacements |
|
300 | 300 | self.backupfile = backupfile |
|
301 | 301 | |
|
302 | 302 | def write(self): |
|
303 | 303 | fp = self.repo.vfs('histedit-state', 'w') |
|
304 | 304 | fp.write('v1\n') |
|
305 | 305 | fp.write('%s\n' % node.hex(self.parentctxnode)) |
|
306 | 306 | fp.write('%s\n' % node.hex(self.topmost)) |
|
307 | 307 | fp.write('%s\n' % self.keep) |
|
308 | 308 | fp.write('%d\n' % len(self.actions)) |
|
309 | 309 | for action in self.actions: |
|
310 | 310 | fp.write('%s\n' % action.tostate()) |
|
311 | 311 | fp.write('%d\n' % len(self.replacements)) |
|
312 | 312 | for replacement in self.replacements: |
|
313 | 313 | fp.write('%s%s\n' % (node.hex(replacement[0]), ''.join(node.hex(r) |
|
314 | 314 | for r in replacement[1]))) |
|
315 | 315 | backupfile = self.backupfile |
|
316 | 316 | if not backupfile: |
|
317 | 317 | backupfile = '' |
|
318 | 318 | fp.write('%s\n' % backupfile) |
|
319 | 319 | fp.close() |
|
320 | 320 | |
|
321 | 321 | def _load(self): |
|
322 | 322 | fp = self.repo.vfs('histedit-state', 'r') |
|
323 | 323 | lines = [l[:-1] for l in fp.readlines()] |
|
324 | 324 | |
|
325 | 325 | index = 0 |
|
326 | 326 | lines[index] # version number |
|
327 | 327 | index += 1 |
|
328 | 328 | |
|
329 | 329 | parentctxnode = node.bin(lines[index]) |
|
330 | 330 | index += 1 |
|
331 | 331 | |
|
332 | 332 | topmost = node.bin(lines[index]) |
|
333 | 333 | index += 1 |
|
334 | 334 | |
|
335 | 335 | keep = lines[index] == 'True' |
|
336 | 336 | index += 1 |
|
337 | 337 | |
|
338 | 338 | # Rules |
|
339 | 339 | rules = [] |
|
340 | 340 | rulelen = int(lines[index]) |
|
341 | 341 | index += 1 |
|
342 | 342 | for i in xrange(rulelen): |
|
343 | 343 | ruleaction = lines[index] |
|
344 | 344 | index += 1 |
|
345 | 345 | rule = lines[index] |
|
346 | 346 | index += 1 |
|
347 | 347 | rules.append((ruleaction, rule)) |
|
348 | 348 | |
|
349 | 349 | # Replacements |
|
350 | 350 | replacements = [] |
|
351 | 351 | replacementlen = int(lines[index]) |
|
352 | 352 | index += 1 |
|
353 | 353 | for i in xrange(replacementlen): |
|
354 | 354 | replacement = lines[index] |
|
355 | 355 | original = node.bin(replacement[:40]) |
|
356 | 356 | succ = [node.bin(replacement[i:i + 40]) for i in |
|
357 | 357 | range(40, len(replacement), 40)] |
|
358 | 358 | replacements.append((original, succ)) |
|
359 | 359 | index += 1 |
|
360 | 360 | |
|
361 | 361 | backupfile = lines[index] |
|
362 | 362 | index += 1 |
|
363 | 363 | |
|
364 | 364 | fp.close() |
|
365 | 365 | |
|
366 | 366 | return parentctxnode, rules, keep, topmost, replacements, backupfile |
|
367 | 367 | |
|
368 | 368 | def clear(self): |
|
369 | 369 | if self.inprogress(): |
|
370 | 370 | self.repo.vfs.unlink('histedit-state') |
|
371 | 371 | |
|
372 | 372 | def inprogress(self): |
|
373 | 373 | return self.repo.vfs.exists('histedit-state') |
|
374 | 374 | |
|
375 | 375 | |
|
376 | 376 | class histeditaction(object): |
|
377 | 377 | def __init__(self, state, node): |
|
378 | 378 | self.state = state |
|
379 | 379 | self.repo = state.repo |
|
380 | 380 | self.node = node |
|
381 | 381 | |
|
382 | 382 | @classmethod |
|
383 | 383 | def fromrule(cls, state, rule): |
|
384 | 384 | """Parses the given rule, returning an instance of the histeditaction. |
|
385 | 385 | """ |
|
386 | 386 | rulehash = rule.strip().split(' ', 1)[0] |
|
387 | 387 | try: |
|
388 | 388 | rev = node.bin(rulehash) |
|
389 | 389 | except TypeError: |
|
390 | 390 | raise error.ParseError("invalid changeset %s" % rulehash) |
|
391 | 391 | return cls(state, rev) |
|
392 | 392 | |
|
393 | 393 | def verify(self, prev, expected, seen): |
|
394 | 394 | """ Verifies semantic correctness of the rule""" |
|
395 | 395 | repo = self.repo |
|
396 | 396 | ha = node.hex(self.node) |
|
397 | 397 | try: |
|
398 | 398 | self.node = repo[ha].node() |
|
399 | 399 | except error.RepoError: |
|
400 | 400 | raise error.ParseError(_('unknown changeset %s listed') |
|
401 | 401 | % ha[:12]) |
|
402 | 402 | if self.node is not None: |
|
403 | 403 | self._verifynodeconstraints(prev, expected, seen) |
|
404 | 404 | |
|
405 | 405 | def _verifynodeconstraints(self, prev, expected, seen): |
|
406 | 406 | # by default command need a node in the edited list |
|
407 | 407 | if self.node not in expected: |
|
408 | 408 | raise error.ParseError(_('%s "%s" changeset was not a candidate') |
|
409 | 409 | % (self.verb, node.short(self.node)), |
|
410 | 410 | hint=_('only use listed changesets')) |
|
411 | 411 | # and only one command per node |
|
412 | 412 | if self.node in seen: |
|
413 | 413 | raise error.ParseError(_('duplicated command for changeset %s') % |
|
414 | 414 | node.short(self.node)) |
|
415 | 415 | |
|
416 | 416 | def torule(self): |
|
417 | 417 | """build a histedit rule line for an action |
|
418 | 418 | |
|
419 | 419 | by default lines are in the form: |
|
420 | 420 | <hash> <rev> <summary> |
|
421 | 421 | """ |
|
422 | 422 | ctx = self.repo[self.node] |
|
423 | 423 | summary = _getsummary(ctx) |
|
424 | 424 | line = '%s %s %d %s' % (self.verb, ctx, ctx.rev(), summary) |
|
425 | 425 | # trim to 75 columns by default so it's not stupidly wide in my editor |
|
426 | 426 | # (the 5 more are left for verb) |
|
427 | 427 | maxlen = self.repo.ui.configint('histedit', 'linelen', default=80) |
|
428 | 428 | maxlen = max(maxlen, 22) # avoid truncating hash |
|
429 | 429 | return util.ellipsis(line, maxlen) |
|
430 | 430 | |
|
431 | 431 | def tostate(self): |
|
432 | 432 | """Print an action in format used by histedit state files |
|
433 | 433 | (the first line is a verb, the remainder is the second) |
|
434 | 434 | """ |
|
435 | 435 | return "%s\n%s" % (self.verb, node.hex(self.node)) |
|
436 | 436 | |
|
437 | 437 | def run(self): |
|
438 | 438 | """Runs the action. The default behavior is simply apply the action's |
|
439 | 439 | rulectx onto the current parentctx.""" |
|
440 | 440 | self.applychange() |
|
441 | 441 | self.continuedirty() |
|
442 | 442 | return self.continueclean() |
|
443 | 443 | |
|
444 | 444 | def applychange(self): |
|
445 | 445 | """Applies the changes from this action's rulectx onto the current |
|
446 | 446 | parentctx, but does not commit them.""" |
|
447 | 447 | repo = self.repo |
|
448 | 448 | rulectx = repo[self.node] |
|
449 | 449 | repo.ui.pushbuffer(error=True, labeled=True) |
|
450 | 450 | hg.update(repo, self.state.parentctxnode, quietempty=True) |
|
451 | 451 | stats = applychanges(repo.ui, repo, rulectx, {}) |
|
452 | 452 | if stats and stats[3] > 0: |
|
453 | 453 | buf = repo.ui.popbuffer() |
|
454 | 454 | repo.ui.write(*buf) |
|
455 | 455 | raise error.InterventionRequired( |
|
456 | 456 | _('Fix up the change (%s %s)') % |
|
457 | 457 | (self.verb, node.short(self.node)), |
|
458 | 458 | hint=_('hg histedit --continue to resume')) |
|
459 | 459 | else: |
|
460 | 460 | repo.ui.popbuffer() |
|
461 | 461 | |
|
462 | 462 | def continuedirty(self): |
|
463 | 463 | """Continues the action when changes have been applied to the working |
|
464 | 464 | copy. The default behavior is to commit the dirty changes.""" |
|
465 | 465 | repo = self.repo |
|
466 | 466 | rulectx = repo[self.node] |
|
467 | 467 | |
|
468 | 468 | editor = self.commiteditor() |
|
469 | 469 | commit = commitfuncfor(repo, rulectx) |
|
470 | 470 | |
|
471 | 471 | commit(text=rulectx.description(), user=rulectx.user(), |
|
472 | 472 | date=rulectx.date(), extra=rulectx.extra(), editor=editor) |
|
473 | 473 | |
|
474 | 474 | def commiteditor(self): |
|
475 | 475 | """The editor to be used to edit the commit message.""" |
|
476 | 476 | return False |
|
477 | 477 | |
|
478 | 478 | def continueclean(self): |
|
479 | 479 | """Continues the action when the working copy is clean. The default |
|
480 | 480 | behavior is to accept the current commit as the new version of the |
|
481 | 481 | rulectx.""" |
|
482 | 482 | ctx = self.repo['.'] |
|
483 | 483 | if ctx.node() == self.state.parentctxnode: |
|
484 | 484 | self.repo.ui.warn(_('%s: skipping changeset (no changes)\n') % |
|
485 | 485 | node.short(self.node)) |
|
486 | 486 | return ctx, [(self.node, tuple())] |
|
487 | 487 | if ctx.node() == self.node: |
|
488 | 488 | # Nothing changed |
|
489 | 489 | return ctx, [] |
|
490 | 490 | return ctx, [(self.node, (ctx.node(),))] |
|
491 | 491 | |
|
492 | 492 | def commitfuncfor(repo, src): |
|
493 | 493 | """Build a commit function for the replacement of <src> |
|
494 | 494 | |
|
495 | 495 | This function ensure we apply the same treatment to all changesets. |
|
496 | 496 | |
|
497 | 497 | - Add a 'histedit_source' entry in extra. |
|
498 | 498 | |
|
499 | 499 | Note that fold has its own separated logic because its handling is a bit |
|
500 | 500 | different and not easily factored out of the fold method. |
|
501 | 501 | """ |
|
502 | 502 | phasemin = src.phase() |
|
503 | 503 | def commitfunc(**kwargs): |
|
504 | 504 | phasebackup = repo.ui.backupconfig('phases', 'new-commit') |
|
505 | 505 | try: |
|
506 | 506 | repo.ui.setconfig('phases', 'new-commit', phasemin, |
|
507 | 507 | 'histedit') |
|
508 | 508 | extra = kwargs.get('extra', {}).copy() |
|
509 | 509 | extra['histedit_source'] = src.hex() |
|
510 | 510 | kwargs['extra'] = extra |
|
511 | 511 | return repo.commit(**kwargs) |
|
512 | 512 | finally: |
|
513 | 513 | repo.ui.restoreconfig(phasebackup) |
|
514 | 514 | return commitfunc |
|
515 | 515 | |
|
516 | 516 | def applychanges(ui, repo, ctx, opts): |
|
517 | 517 | """Merge changeset from ctx (only) in the current working directory""" |
|
518 | 518 | wcpar = repo.dirstate.parents()[0] |
|
519 | 519 | if ctx.p1().node() == wcpar: |
|
520 | 520 | # edits are "in place" we do not need to make any merge, |
|
521 | 521 | # just applies changes on parent for editing |
|
522 | 522 | cmdutil.revert(ui, repo, ctx, (wcpar, node.nullid), all=True) |
|
523 | 523 | stats = None |
|
524 | 524 | else: |
|
525 | 525 | try: |
|
526 | 526 | # ui.forcemerge is an internal variable, do not document |
|
527 | 527 | repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), |
|
528 | 528 | 'histedit') |
|
529 | 529 | stats = mergemod.graft(repo, ctx, ctx.p1(), ['local', 'histedit']) |
|
530 | 530 | finally: |
|
531 | 531 | repo.ui.setconfig('ui', 'forcemerge', '', 'histedit') |
|
532 | 532 | return stats |
|
533 | 533 | |
|
534 | 534 | def collapse(repo, first, last, commitopts, skipprompt=False): |
|
535 | 535 | """collapse the set of revisions from first to last as new one. |
|
536 | 536 | |
|
537 | 537 | Expected commit options are: |
|
538 | 538 | - message |
|
539 | 539 | - date |
|
540 | 540 | - username |
|
541 | 541 | Commit message is edited in all cases. |
|
542 | 542 | |
|
543 | 543 | This function works in memory.""" |
|
544 | 544 | ctxs = list(repo.set('%d::%d', first, last)) |
|
545 | 545 | if not ctxs: |
|
546 | 546 | return None |
|
547 | 547 | for c in ctxs: |
|
548 | 548 | if not c.mutable(): |
|
549 | 549 | raise error.ParseError( |
|
550 | 550 | _("cannot fold into public change %s") % node.short(c.node())) |
|
551 | 551 | base = first.parents()[0] |
|
552 | 552 | |
|
553 | 553 | # commit a new version of the old changeset, including the update |
|
554 | 554 | # collect all files which might be affected |
|
555 | 555 | files = set() |
|
556 | 556 | for ctx in ctxs: |
|
557 | 557 | files.update(ctx.files()) |
|
558 | 558 | |
|
559 | 559 | # Recompute copies (avoid recording a -> b -> a) |
|
560 | 560 | copied = copies.pathcopies(base, last) |
|
561 | 561 | |
|
562 | 562 | # prune files which were reverted by the updates |
|
563 | 563 | files = [f for f in files if not cmdutil.samefile(f, last, base)] |
|
564 | 564 | # commit version of these files as defined by head |
|
565 | 565 | headmf = last.manifest() |
|
566 | 566 | def filectxfn(repo, ctx, path): |
|
567 | 567 | if path in headmf: |
|
568 | 568 | fctx = last[path] |
|
569 | 569 | flags = fctx.flags() |
|
570 | 570 | mctx = context.memfilectx(repo, |
|
571 | 571 | fctx.path(), fctx.data(), |
|
572 | 572 | islink='l' in flags, |
|
573 | 573 | isexec='x' in flags, |
|
574 | 574 | copied=copied.get(path)) |
|
575 | 575 | return mctx |
|
576 | 576 | return None |
|
577 | 577 | |
|
578 | 578 | if commitopts.get('message'): |
|
579 | 579 | message = commitopts['message'] |
|
580 | 580 | else: |
|
581 | 581 | message = first.description() |
|
582 | 582 | user = commitopts.get('user') |
|
583 | 583 | date = commitopts.get('date') |
|
584 | 584 | extra = commitopts.get('extra') |
|
585 | 585 | |
|
586 | 586 | parents = (first.p1().node(), first.p2().node()) |
|
587 | 587 | editor = None |
|
588 | 588 | if not skipprompt: |
|
589 | 589 | editor = cmdutil.getcommiteditor(edit=True, editform='histedit.fold') |
|
590 | 590 | new = context.memctx(repo, |
|
591 | 591 | parents=parents, |
|
592 | 592 | text=message, |
|
593 | 593 | files=files, |
|
594 | 594 | filectxfn=filectxfn, |
|
595 | 595 | user=user, |
|
596 | 596 | date=date, |
|
597 | 597 | extra=extra, |
|
598 | 598 | editor=editor) |
|
599 | 599 | return repo.commitctx(new) |
|
600 | 600 | |
|
601 | 601 | def _isdirtywc(repo): |
|
602 | 602 | return repo[None].dirty(missing=True) |
|
603 | 603 | |
|
604 | 604 | def abortdirty(): |
|
605 | 605 | raise error.Abort(_('working copy has pending changes'), |
|
606 | 606 | hint=_('amend, commit, or revert them and run histedit ' |
|
607 | 607 | '--continue, or abort with histedit --abort')) |
|
608 | 608 | |
|
609 | 609 | def action(verbs, message, priority=False, internal=False): |
|
610 | 610 | def wrap(cls): |
|
611 | 611 | assert not priority or not internal |
|
612 | 612 | verb = verbs[0] |
|
613 | 613 | if priority: |
|
614 | 614 | primaryactions.add(verb) |
|
615 | 615 | elif internal: |
|
616 | 616 | internalactions.add(verb) |
|
617 | 617 | elif len(verbs) > 1: |
|
618 | 618 | secondaryactions.add(verb) |
|
619 | 619 | else: |
|
620 | 620 | tertiaryactions.add(verb) |
|
621 | 621 | |
|
622 | 622 | cls.verb = verb |
|
623 | 623 | cls.verbs = verbs |
|
624 | 624 | cls.message = message |
|
625 | 625 | for verb in verbs: |
|
626 | 626 | actiontable[verb] = cls |
|
627 | 627 | return cls |
|
628 | 628 | return wrap |
|
629 | 629 | |
|
630 | 630 | @action(['pick', 'p'], |
|
631 | 631 | _('use commit'), |
|
632 | 632 | priority=True) |
|
633 | 633 | class pick(histeditaction): |
|
634 | 634 | def run(self): |
|
635 | 635 | rulectx = self.repo[self.node] |
|
636 | 636 | if rulectx.parents()[0].node() == self.state.parentctxnode: |
|
637 | 637 | self.repo.ui.debug('node %s unchanged\n' % node.short(self.node)) |
|
638 | 638 | return rulectx, [] |
|
639 | 639 | |
|
640 | 640 | return super(pick, self).run() |
|
641 | 641 | |
|
642 | 642 | @action(['edit', 'e'], |
|
643 | 643 | _('use commit, but stop for amending'), |
|
644 | 644 | priority=True) |
|
645 | 645 | class edit(histeditaction): |
|
646 | 646 | def run(self): |
|
647 | 647 | repo = self.repo |
|
648 | 648 | rulectx = repo[self.node] |
|
649 | 649 | hg.update(repo, self.state.parentctxnode, quietempty=True) |
|
650 | 650 | applychanges(repo.ui, repo, rulectx, {}) |
|
651 | 651 | raise error.InterventionRequired( |
|
652 | 652 | _('Editing (%s), you may commit or record as needed now.') |
|
653 | 653 | % node.short(self.node), |
|
654 | 654 | hint=_('hg histedit --continue to resume')) |
|
655 | 655 | |
|
656 | 656 | def commiteditor(self): |
|
657 | 657 | return cmdutil.getcommiteditor(edit=True, editform='histedit.edit') |
|
658 | 658 | |
|
659 | 659 | @action(['fold', 'f'], |
|
660 | 660 | _('use commit, but combine it with the one above')) |
|
661 | 661 | class fold(histeditaction): |
|
662 | 662 | def verify(self, prev, expected, seen): |
|
663 | 663 | """ Verifies semantic correctness of the fold rule""" |
|
664 | 664 | super(fold, self).verify(prev, expected, seen) |
|
665 | 665 | repo = self.repo |
|
666 | 666 | if not prev: |
|
667 | 667 | c = repo[self.node].parents()[0] |
|
668 | 668 | elif not prev.verb in ('pick', 'base'): |
|
669 | 669 | return |
|
670 | 670 | else: |
|
671 | 671 | c = repo[prev.node] |
|
672 | 672 | if not c.mutable(): |
|
673 | 673 | raise error.ParseError( |
|
674 | 674 | _("cannot fold into public change %s") % node.short(c.node())) |
|
675 | 675 | |
|
676 | 676 | |
|
677 | 677 | def continuedirty(self): |
|
678 | 678 | repo = self.repo |
|
679 | 679 | rulectx = repo[self.node] |
|
680 | 680 | |
|
681 | 681 | commit = commitfuncfor(repo, rulectx) |
|
682 | 682 | commit(text='fold-temp-revision %s' % node.short(self.node), |
|
683 | 683 | user=rulectx.user(), date=rulectx.date(), |
|
684 | 684 | extra=rulectx.extra()) |
|
685 | 685 | |
|
686 | 686 | def continueclean(self): |
|
687 | 687 | repo = self.repo |
|
688 | 688 | ctx = repo['.'] |
|
689 | 689 | rulectx = repo[self.node] |
|
690 | 690 | parentctxnode = self.state.parentctxnode |
|
691 | 691 | if ctx.node() == parentctxnode: |
|
692 | 692 | repo.ui.warn(_('%s: empty changeset\n') % |
|
693 | 693 | node.short(self.node)) |
|
694 | 694 | return ctx, [(self.node, (parentctxnode,))] |
|
695 | 695 | |
|
696 | 696 | parentctx = repo[parentctxnode] |
|
697 | 697 | newcommits = set(c.node() for c in repo.set('(%d::. - %d)', parentctx, |
|
698 | 698 | parentctx)) |
|
699 | 699 | if not newcommits: |
|
700 | 700 | repo.ui.warn(_('%s: cannot fold - working copy is not a ' |
|
701 | 701 | 'descendant of previous commit %s\n') % |
|
702 | 702 | (node.short(self.node), node.short(parentctxnode))) |
|
703 | 703 | return ctx, [(self.node, (ctx.node(),))] |
|
704 | 704 | |
|
705 | 705 | middlecommits = newcommits.copy() |
|
706 | 706 | middlecommits.discard(ctx.node()) |
|
707 | 707 | |
|
708 | 708 | return self.finishfold(repo.ui, repo, parentctx, rulectx, ctx.node(), |
|
709 | 709 | middlecommits) |
|
710 | 710 | |
|
711 | 711 | def skipprompt(self): |
|
712 | 712 | """Returns true if the rule should skip the message editor. |
|
713 | 713 | |
|
714 | 714 | For example, 'fold' wants to show an editor, but 'rollup' |
|
715 | 715 | doesn't want to. |
|
716 | 716 | """ |
|
717 | 717 | return False |
|
718 | 718 | |
|
719 | 719 | def mergedescs(self): |
|
720 | 720 | """Returns true if the rule should merge messages of multiple changes. |
|
721 | 721 | |
|
722 | 722 | This exists mainly so that 'rollup' rules can be a subclass of |
|
723 | 723 | 'fold'. |
|
724 | 724 | """ |
|
725 | 725 | return True |
|
726 | 726 | |
|
727 | 727 | def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges): |
|
728 | 728 | parent = ctx.parents()[0].node() |
|
729 | 729 | repo.ui.pushbuffer() |
|
730 | 730 | hg.update(repo, parent) |
|
731 | 731 | repo.ui.popbuffer() |
|
732 | 732 | ### prepare new commit data |
|
733 | 733 | commitopts = {} |
|
734 | 734 | commitopts['user'] = ctx.user() |
|
735 | 735 | # commit message |
|
736 | 736 | if not self.mergedescs(): |
|
737 | 737 | newmessage = ctx.description() |
|
738 | 738 | else: |
|
739 | 739 | newmessage = '\n***\n'.join( |
|
740 | 740 | [ctx.description()] + |
|
741 | 741 | [repo[r].description() for r in internalchanges] + |
|
742 | 742 | [oldctx.description()]) + '\n' |
|
743 | 743 | commitopts['message'] = newmessage |
|
744 | 744 | # date |
|
745 | 745 | commitopts['date'] = max(ctx.date(), oldctx.date()) |
|
746 | 746 | extra = ctx.extra().copy() |
|
747 | 747 | # histedit_source |
|
748 | 748 | # note: ctx is likely a temporary commit but that the best we can do |
|
749 | 749 | # here. This is sufficient to solve issue3681 anyway. |
|
750 | 750 | extra['histedit_source'] = '%s,%s' % (ctx.hex(), oldctx.hex()) |
|
751 | 751 | commitopts['extra'] = extra |
|
752 | 752 | phasebackup = repo.ui.backupconfig('phases', 'new-commit') |
|
753 | 753 | try: |
|
754 | 754 | phasemin = max(ctx.phase(), oldctx.phase()) |
|
755 | 755 | repo.ui.setconfig('phases', 'new-commit', phasemin, 'histedit') |
|
756 | 756 | n = collapse(repo, ctx, repo[newnode], commitopts, |
|
757 | 757 | skipprompt=self.skipprompt()) |
|
758 | 758 | finally: |
|
759 | 759 | repo.ui.restoreconfig(phasebackup) |
|
760 | 760 | if n is None: |
|
761 | 761 | return ctx, [] |
|
762 | 762 | repo.ui.pushbuffer() |
|
763 | 763 | hg.update(repo, n) |
|
764 | 764 | repo.ui.popbuffer() |
|
765 | 765 | replacements = [(oldctx.node(), (newnode,)), |
|
766 | 766 | (ctx.node(), (n,)), |
|
767 | 767 | (newnode, (n,)), |
|
768 | 768 | ] |
|
769 | 769 | for ich in internalchanges: |
|
770 | 770 | replacements.append((ich, (n,))) |
|
771 | 771 | return repo[n], replacements |
|
772 | 772 | |
|
773 | 773 | class base(histeditaction): |
|
774 | 774 | |
|
775 | 775 | def run(self): |
|
776 | 776 | if self.repo['.'].node() != self.node: |
|
777 | 777 | mergemod.update(self.repo, self.node, False, True) |
|
778 | 778 | # branchmerge, force) |
|
779 | 779 | return self.continueclean() |
|
780 | 780 | |
|
781 | 781 | def continuedirty(self): |
|
782 | 782 | abortdirty() |
|
783 | 783 | |
|
784 | 784 | def continueclean(self): |
|
785 | 785 | basectx = self.repo['.'] |
|
786 | 786 | return basectx, [] |
|
787 | 787 | |
|
788 | 788 | def _verifynodeconstraints(self, prev, expected, seen): |
|
789 | 789 | # base can only be use with a node not in the edited set |
|
790 | 790 | if self.node in expected: |
|
791 | 791 | msg = _('%s "%s" changeset was an edited list candidate') |
|
792 | 792 | raise error.ParseError( |
|
793 | 793 | msg % (self.verb, node.short(self.node)), |
|
794 | 794 | hint=_('base must only use unlisted changesets')) |
|
795 | 795 | |
|
796 | 796 | @action(['_multifold'], |
|
797 | 797 | _( |
|
798 | 798 | """fold subclass used for when multiple folds happen in a row |
|
799 | 799 | |
|
800 | 800 | We only want to fire the editor for the folded message once when |
|
801 | 801 | (say) four changes are folded down into a single change. This is |
|
802 | 802 | similar to rollup, but we should preserve both messages so that |
|
803 | 803 | when the last fold operation runs we can show the user all the |
|
804 | 804 | commit messages in their editor. |
|
805 | 805 | """), |
|
806 | 806 | internal=True) |
|
807 | 807 | class _multifold(fold): |
|
808 | 808 | def skipprompt(self): |
|
809 | 809 | return True |
|
810 | 810 | |
|
811 | 811 | @action(["roll", "r"], |
|
812 | 812 | _("like fold, but discard this commit's description")) |
|
813 | 813 | class rollup(fold): |
|
814 | 814 | def mergedescs(self): |
|
815 | 815 | return False |
|
816 | 816 | |
|
817 | 817 | def skipprompt(self): |
|
818 | 818 | return True |
|
819 | 819 | |
|
820 | 820 | @action(["drop", "d"], |
|
821 | 821 | _('remove commit from history')) |
|
822 | 822 | class drop(histeditaction): |
|
823 | 823 | def run(self): |
|
824 | 824 | parentctx = self.repo[self.state.parentctxnode] |
|
825 | 825 | return parentctx, [(self.node, tuple())] |
|
826 | 826 | |
|
827 | 827 | @action(["mess", "m"], |
|
828 | 828 | _('edit commit message without changing commit content'), |
|
829 | 829 | priority=True) |
|
830 | 830 | class message(histeditaction): |
|
831 | 831 | def commiteditor(self): |
|
832 | 832 | return cmdutil.getcommiteditor(edit=True, editform='histedit.mess') |
|
833 | 833 | |
|
834 | 834 | def findoutgoing(ui, repo, remote=None, force=False, opts=None): |
|
835 | 835 | """utility function to find the first outgoing changeset |
|
836 | 836 | |
|
837 | 837 | Used by initialization code""" |
|
838 | 838 | if opts is None: |
|
839 | 839 | opts = {} |
|
840 | 840 | dest = ui.expandpath(remote or 'default-push', remote or 'default') |
|
841 | 841 | dest, revs = hg.parseurl(dest, None)[:2] |
|
842 | 842 | ui.status(_('comparing with %s\n') % util.hidepassword(dest)) |
|
843 | 843 | |
|
844 | 844 | revs, checkout = hg.addbranchrevs(repo, repo, revs, None) |
|
845 | 845 | other = hg.peer(repo, opts, dest) |
|
846 | 846 | |
|
847 | 847 | if revs: |
|
848 | 848 | revs = [repo.lookup(rev) for rev in revs] |
|
849 | 849 | |
|
850 | 850 | outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force) |
|
851 | 851 | if not outgoing.missing: |
|
852 | 852 | raise error.Abort(_('no outgoing ancestors')) |
|
853 | 853 | roots = list(repo.revs("roots(%ln)", outgoing.missing)) |
|
854 | 854 | if 1 < len(roots): |
|
855 | 855 | msg = _('there are ambiguous outgoing revisions') |
|
856 | 856 | hint = _("see 'hg help histedit' for more detail") |
|
857 | 857 | raise error.Abort(msg, hint=hint) |
|
858 | 858 | return repo.lookup(roots[0]) |
|
859 | 859 | |
|
860 | 860 | |
|
861 | 861 | @command('histedit', |
|
862 | 862 | [('', 'commands', '', |
|
863 | 863 | _('read history edits from the specified file'), _('FILE')), |
|
864 | 864 | ('c', 'continue', False, _('continue an edit already in progress')), |
|
865 | 865 | ('', 'edit-plan', False, _('edit remaining actions list')), |
|
866 | 866 | ('k', 'keep', False, |
|
867 | 867 | _("don't strip old nodes after edit is complete")), |
|
868 | 868 | ('', 'abort', False, _('abort an edit in progress')), |
|
869 | 869 | ('o', 'outgoing', False, _('changesets not found in destination')), |
|
870 | 870 | ('f', 'force', False, |
|
871 | 871 | _('force outgoing even for unrelated repositories')), |
|
872 | 872 | ('r', 'rev', [], _('first revision to be edited'), _('REV'))], |
|
873 | 873 | _("[OPTIONS] ([ANCESTOR] | --outgoing [URL])")) |
|
874 | 874 | def histedit(ui, repo, *freeargs, **opts): |
|
875 | 875 | """interactively edit changeset history |
|
876 | 876 | |
|
877 | 877 | This command lets you edit a linear series of changesets (up to |
|
878 | 878 | and including the working directory, which should be clean). |
|
879 | 879 | You can: |
|
880 | 880 | |
|
881 | 881 | - `pick` to [re]order a changeset |
|
882 | 882 | |
|
883 | 883 | - `drop` to omit changeset |
|
884 | 884 | |
|
885 | 885 | - `mess` to reword the changeset commit message |
|
886 | 886 | |
|
887 | 887 | - `fold` to combine it with the preceding changeset |
|
888 | 888 | |
|
889 | 889 | - `roll` like fold, but discarding this commit's description |
|
890 | 890 | |
|
891 | 891 | - `edit` to edit this changeset |
|
892 | 892 | |
|
893 | 893 | There are a number of ways to select the root changeset: |
|
894 | 894 | |
|
895 | 895 | - Specify ANCESTOR directly |
|
896 | 896 | |
|
897 | 897 | - Use --outgoing -- it will be the first linear changeset not |
|
898 | 898 | included in destination. (See :hg:`help config.paths.default-push`) |
|
899 | 899 | |
|
900 | 900 | - Otherwise, the value from the "histedit.defaultrev" config option |
|
901 | 901 | is used as a revset to select the base revision when ANCESTOR is not |
|
902 | 902 | specified. The first revision returned by the revset is used. By |
|
903 | 903 | default, this selects the editable history that is unique to the |
|
904 | 904 | ancestry of the working directory. |
|
905 | 905 | |
|
906 | 906 | .. container:: verbose |
|
907 | 907 | |
|
908 | 908 | If you use --outgoing, this command will abort if there are ambiguous |
|
909 | 909 | outgoing revisions. For example, if there are multiple branches |
|
910 | 910 | containing outgoing revisions. |
|
911 | 911 | |
|
912 | 912 | Use "min(outgoing() and ::.)" or similar revset specification |
|
913 | 913 | instead of --outgoing to specify edit target revision exactly in |
|
914 | 914 | such ambiguous situation. See :hg:`help revsets` for detail about |
|
915 | 915 | selecting revisions. |
|
916 | 916 | |
|
917 | 917 | .. container:: verbose |
|
918 | 918 | |
|
919 | 919 | Examples: |
|
920 | 920 | |
|
921 | 921 | - A number of changes have been made. |
|
922 | 922 | Revision 3 is no longer needed. |
|
923 | 923 | |
|
924 | 924 | Start history editing from revision 3:: |
|
925 | 925 | |
|
926 | 926 | hg histedit -r 3 |
|
927 | 927 | |
|
928 | 928 | An editor opens, containing the list of revisions, |
|
929 | 929 | with specific actions specified:: |
|
930 | 930 | |
|
931 | 931 | pick 5339bf82f0ca 3 Zworgle the foobar |
|
932 | 932 | pick 8ef592ce7cc4 4 Bedazzle the zerlog |
|
933 | 933 | pick 0a9639fcda9d 5 Morgify the cromulancy |
|
934 | 934 | |
|
935 | 935 | Additional information about the possible actions |
|
936 | 936 | to take appears below the list of revisions. |
|
937 | 937 | |
|
938 | 938 | To remove revision 3 from the history, |
|
939 | 939 | its action (at the beginning of the relevant line) |
|
940 | 940 | is changed to 'drop':: |
|
941 | 941 | |
|
942 | 942 | drop 5339bf82f0ca 3 Zworgle the foobar |
|
943 | 943 | pick 8ef592ce7cc4 4 Bedazzle the zerlog |
|
944 | 944 | pick 0a9639fcda9d 5 Morgify the cromulancy |
|
945 | 945 | |
|
946 | 946 | - A number of changes have been made. |
|
947 | 947 | Revision 2 and 4 need to be swapped. |
|
948 | 948 | |
|
949 | 949 | Start history editing from revision 2:: |
|
950 | 950 | |
|
951 | 951 | hg histedit -r 2 |
|
952 | 952 | |
|
953 | 953 | An editor opens, containing the list of revisions, |
|
954 | 954 | with specific actions specified:: |
|
955 | 955 | |
|
956 | 956 | pick 252a1af424ad 2 Blorb a morgwazzle |
|
957 | 957 | pick 5339bf82f0ca 3 Zworgle the foobar |
|
958 | 958 | pick 8ef592ce7cc4 4 Bedazzle the zerlog |
|
959 | 959 | |
|
960 | 960 | To swap revision 2 and 4, its lines are swapped |
|
961 | 961 | in the editor:: |
|
962 | 962 | |
|
963 | 963 | pick 8ef592ce7cc4 4 Bedazzle the zerlog |
|
964 | 964 | pick 5339bf82f0ca 3 Zworgle the foobar |
|
965 | 965 | pick 252a1af424ad 2 Blorb a morgwazzle |
|
966 | 966 | |
|
967 | 967 | Returns 0 on success, 1 if user intervention is required (not only |
|
968 | 968 | for intentional "edit" command, but also for resolving unexpected |
|
969 | 969 | conflicts). |
|
970 | 970 | """ |
|
971 | 971 | state = histeditstate(repo) |
|
972 | 972 | try: |
|
973 | 973 | state.wlock = repo.wlock() |
|
974 | 974 | state.lock = repo.lock() |
|
975 | 975 | _histedit(ui, repo, state, *freeargs, **opts) |
|
976 | 976 | finally: |
|
977 | 977 | release(state.lock, state.wlock) |
|
978 | 978 | |
|
979 | 979 | goalcontinue = 'continue' |
|
980 | 980 | goalabort = 'abort' |
|
981 | 981 | goaleditplan = 'edit-plan' |
|
982 | 982 | goalnew = 'new' |
|
983 | 983 | |
|
984 | 984 | def _getgoal(opts): |
|
985 | 985 | if opts.get('continue'): |
|
986 | 986 | return goalcontinue |
|
987 | 987 | if opts.get('abort'): |
|
988 | 988 | return goalabort |
|
989 | 989 | if opts.get('edit_plan'): |
|
990 | 990 | return goaleditplan |
|
991 | 991 | return goalnew |
|
992 | 992 | |
|
993 | 993 | def _readfile(ui, path): |
|
994 | 994 | if path == '-': |
|
995 | 995 | return ui.fin.read() |
|
996 | 996 | else: |
|
997 | 997 | with open(path, 'rb') as f: |
|
998 | 998 | return f.read() |
|
999 | 999 | |
|
1000 | 1000 | def _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs): |
|
1001 | 1001 | # TODO only abort if we try to histedit mq patches, not just |
|
1002 | 1002 | # blanket if mq patches are applied somewhere |
|
1003 | 1003 | mq = getattr(repo, 'mq', None) |
|
1004 | 1004 | if mq and mq.applied: |
|
1005 | 1005 | raise error.Abort(_('source has mq patches applied')) |
|
1006 | 1006 | |
|
1007 | 1007 | # basic argument incompatibility processing |
|
1008 | 1008 | outg = opts.get('outgoing') |
|
1009 | 1009 | editplan = opts.get('edit_plan') |
|
1010 | 1010 | abort = opts.get('abort') |
|
1011 | 1011 | force = opts.get('force') |
|
1012 | 1012 | if force and not outg: |
|
1013 | 1013 | raise error.Abort(_('--force only allowed with --outgoing')) |
|
1014 | 1014 | if goal == 'continue': |
|
1015 | 1015 | if any((outg, abort, revs, freeargs, rules, editplan)): |
|
1016 | 1016 | raise error.Abort(_('no arguments allowed with --continue')) |
|
1017 | 1017 | elif goal == 'abort': |
|
1018 | 1018 | if any((outg, revs, freeargs, rules, editplan)): |
|
1019 | 1019 | raise error.Abort(_('no arguments allowed with --abort')) |
|
1020 | 1020 | elif goal == 'edit-plan': |
|
1021 | 1021 | if any((outg, revs, freeargs)): |
|
1022 | 1022 | raise error.Abort(_('only --commands argument allowed with ' |
|
1023 | 1023 | '--edit-plan')) |
|
1024 | 1024 | else: |
|
1025 | 1025 | if os.path.exists(os.path.join(repo.path, 'histedit-state')): |
|
1026 | 1026 | raise error.Abort(_('history edit already in progress, try ' |
|
1027 | 1027 | '--continue or --abort')) |
|
1028 | 1028 | if outg: |
|
1029 | 1029 | if revs: |
|
1030 | 1030 | raise error.Abort(_('no revisions allowed with --outgoing')) |
|
1031 | 1031 | if len(freeargs) > 1: |
|
1032 | 1032 | raise error.Abort( |
|
1033 | 1033 | _('only one repo argument allowed with --outgoing')) |
|
1034 | 1034 | else: |
|
1035 | 1035 | revs.extend(freeargs) |
|
1036 | 1036 | if len(revs) == 0: |
|
1037 | 1037 | defaultrev = destutil.desthistedit(ui, repo) |
|
1038 | 1038 | if defaultrev is not None: |
|
1039 | 1039 | revs.append(defaultrev) |
|
1040 | 1040 | |
|
1041 | 1041 | if len(revs) != 1: |
|
1042 | 1042 | raise error.Abort( |
|
1043 | 1043 | _('histedit requires exactly one ancestor revision')) |
|
1044 | 1044 | |
|
1045 | 1045 | def _histedit(ui, repo, state, *freeargs, **opts): |
|
1046 | 1046 | goal = _getgoal(opts) |
|
1047 | 1047 | revs = opts.get('rev', []) |
|
1048 | 1048 | rules = opts.get('commands', '') |
|
1049 | 1049 | state.keep = opts.get('keep', False) |
|
1050 | 1050 | |
|
1051 | 1051 | _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs) |
|
1052 | 1052 | |
|
1053 | 1053 | # rebuild state |
|
1054 | 1054 | if goal == goalcontinue: |
|
1055 | 1055 | state.read() |
|
1056 | 1056 | state = bootstrapcontinue(ui, state, opts) |
|
1057 | 1057 | elif goal == goaleditplan: |
|
1058 | 1058 | _edithisteditplan(ui, repo, state, rules) |
|
1059 | 1059 | return |
|
1060 | 1060 | elif goal == goalabort: |
|
1061 | 1061 | _aborthistedit(ui, repo, state) |
|
1062 | 1062 | return |
|
1063 | 1063 | else: |
|
1064 | 1064 | # goal == goalnew |
|
1065 | 1065 | _newhistedit(ui, repo, state, revs, freeargs, opts) |
|
1066 | 1066 | |
|
1067 | 1067 | _continuehistedit(ui, repo, state) |
|
1068 | 1068 | _finishhistedit(ui, repo, state) |
|
1069 | 1069 | |
|
1070 | 1070 | def _continuehistedit(ui, repo, state): |
|
1071 | 1071 | """This function runs after either: |
|
1072 | 1072 | - bootstrapcontinue (if the goal is 'continue') |
|
1073 | 1073 | - _newhistedit (if the goal is 'new') |
|
1074 | 1074 | """ |
|
1075 | 1075 | # preprocess rules so that we can hide inner folds from the user |
|
1076 | 1076 | # and only show one editor |
|
1077 | 1077 | actions = state.actions[:] |
|
1078 | 1078 | for idx, (action, nextact) in enumerate( |
|
1079 | 1079 | zip(actions, actions[1:] + [None])): |
|
1080 | 1080 | if action.verb == 'fold' and nextact and nextact.verb == 'fold': |
|
1081 | 1081 | state.actions[idx].__class__ = _multifold |
|
1082 | 1082 | |
|
1083 | 1083 | total = len(state.actions) |
|
1084 | 1084 | pos = 0 |
|
1085 | 1085 | while state.actions: |
|
1086 | 1086 | state.write() |
|
1087 | 1087 | actobj = state.actions.pop(0) |
|
1088 | 1088 | pos += 1 |
|
1089 | 1089 | ui.progress(_("editing"), pos, actobj.torule(), |
|
1090 | 1090 | _('changes'), total) |
|
1091 | 1091 | ui.debug('histedit: processing %s %s\n' % (actobj.verb,\ |
|
1092 | 1092 | actobj.torule())) |
|
1093 | 1093 | parentctx, replacement_ = actobj.run() |
|
1094 | 1094 | state.parentctxnode = parentctx.node() |
|
1095 | 1095 | state.replacements.extend(replacement_) |
|
1096 | 1096 | state.write() |
|
1097 | 1097 | ui.progress(_("editing"), None) |
|
1098 | 1098 | |
|
1099 | 1099 | def _finishhistedit(ui, repo, state): |
|
1100 | 1100 | """This action runs when histedit is finishing its session""" |
|
1101 | 1101 | repo.ui.pushbuffer() |
|
1102 | 1102 | hg.update(repo, state.parentctxnode, quietempty=True) |
|
1103 | 1103 | repo.ui.popbuffer() |
|
1104 | 1104 | |
|
1105 | 1105 | mapping, tmpnodes, created, ntm = processreplacement(state) |
|
1106 | 1106 | if mapping: |
|
1107 | 1107 | for prec, succs in mapping.iteritems(): |
|
1108 | 1108 | if not succs: |
|
1109 | 1109 | ui.debug('histedit: %s is dropped\n' % node.short(prec)) |
|
1110 | 1110 | else: |
|
1111 | 1111 | ui.debug('histedit: %s is replaced by %s\n' % ( |
|
1112 | 1112 | node.short(prec), node.short(succs[0]))) |
|
1113 | 1113 | if len(succs) > 1: |
|
1114 | 1114 | m = 'histedit: %s' |
|
1115 | 1115 | for n in succs[1:]: |
|
1116 | 1116 | ui.debug(m % node.short(n)) |
|
1117 | 1117 | |
|
1118 | 1118 | supportsmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt) |
|
1119 | 1119 | if supportsmarkers: |
|
1120 | 1120 | # Only create markers if the temp nodes weren't already removed. |
|
1121 | 1121 | obsolete.createmarkers(repo, ((repo[t],()) for t in sorted(tmpnodes) |
|
1122 | 1122 | if t in repo)) |
|
1123 | 1123 | else: |
|
1124 | 1124 | cleanupnode(ui, repo, 'temp', tmpnodes) |
|
1125 | 1125 | |
|
1126 | 1126 | if not state.keep: |
|
1127 | 1127 | if mapping: |
|
1128 | 1128 | movebookmarks(ui, repo, mapping, state.topmost, ntm) |
|
1129 | 1129 | # TODO update mq state |
|
1130 | 1130 | if supportsmarkers: |
|
1131 | 1131 | markers = [] |
|
1132 | 1132 | # sort by revision number because it sound "right" |
|
1133 | 1133 | for prec in sorted(mapping, key=repo.changelog.rev): |
|
1134 | 1134 | succs = mapping[prec] |
|
1135 | 1135 | markers.append((repo[prec], |
|
1136 | 1136 | tuple(repo[s] for s in succs))) |
|
1137 | 1137 | if markers: |
|
1138 | 1138 | obsolete.createmarkers(repo, markers) |
|
1139 | 1139 | else: |
|
1140 | 1140 | cleanupnode(ui, repo, 'replaced', mapping) |
|
1141 | 1141 | |
|
1142 | 1142 | state.clear() |
|
1143 | 1143 | if os.path.exists(repo.sjoin('undo')): |
|
1144 | 1144 | os.unlink(repo.sjoin('undo')) |
|
1145 | 1145 | if repo.vfs.exists('histedit-last-edit.txt'): |
|
1146 | 1146 | repo.vfs.unlink('histedit-last-edit.txt') |
|
1147 | 1147 | |
|
1148 | 1148 | def _aborthistedit(ui, repo, state): |
|
1149 | 1149 | try: |
|
1150 | 1150 | state.read() |
|
1151 | 1151 | __, leafs, tmpnodes, __ = processreplacement(state) |
|
1152 | 1152 | ui.debug('restore wc to old parent %s\n' |
|
1153 | 1153 | % node.short(state.topmost)) |
|
1154 | 1154 | |
|
1155 | 1155 | # Recover our old commits if necessary |
|
1156 | 1156 | if not state.topmost in repo and state.backupfile: |
|
1157 | 1157 | backupfile = repo.join(state.backupfile) |
|
1158 | 1158 | f = hg.openpath(ui, backupfile) |
|
1159 | 1159 | gen = exchange.readbundle(ui, f, backupfile) |
|
1160 | 1160 | with repo.transaction('histedit.abort') as tr: |
|
1161 | 1161 | if not isinstance(gen, bundle2.unbundle20): |
|
1162 | 1162 | gen.apply(repo, 'histedit', 'bundle:' + backupfile) |
|
1163 | 1163 | if isinstance(gen, bundle2.unbundle20): |
|
1164 | 1164 | bundle2.applybundle(repo, gen, tr, |
|
1165 | 1165 | source='histedit', |
|
1166 | 1166 | url='bundle:' + backupfile) |
|
1167 | 1167 | |
|
1168 | 1168 | os.remove(backupfile) |
|
1169 | 1169 | |
|
1170 | 1170 | # check whether we should update away |
|
1171 | 1171 | if repo.unfiltered().revs('parents() and (%n or %ln::)', |
|
1172 | 1172 | state.parentctxnode, leafs | tmpnodes): |
|
1173 | 1173 | hg.clean(repo, state.topmost, show_stats=True, quietempty=True) |
|
1174 | 1174 | cleanupnode(ui, repo, 'created', tmpnodes) |
|
1175 | 1175 | cleanupnode(ui, repo, 'temp', leafs) |
|
1176 | 1176 | except Exception: |
|
1177 | 1177 | if state.inprogress(): |
|
1178 | 1178 | ui.warn(_('warning: encountered an exception during histedit ' |
|
1179 | 1179 | '--abort; the repository may not have been completely ' |
|
1180 | 1180 | 'cleaned up\n')) |
|
1181 | 1181 | raise |
|
1182 | 1182 | finally: |
|
1183 | 1183 | state.clear() |
|
1184 | 1184 | |
|
1185 | 1185 | def _edithisteditplan(ui, repo, state, rules): |
|
1186 | 1186 | state.read() |
|
1187 | 1187 | if not rules: |
|
1188 | 1188 | comment = geteditcomment(ui, |
|
1189 | 1189 | node.short(state.parentctxnode), |
|
1190 | 1190 | node.short(state.topmost)) |
|
1191 | 1191 | rules = ruleeditor(repo, ui, state.actions, comment) |
|
1192 | 1192 | else: |
|
1193 | 1193 | rules = _readfile(ui, rules) |
|
1194 | 1194 | actions = parserules(rules, state) |
|
1195 | 1195 | ctxs = [repo[act.node] \ |
|
1196 | 1196 | for act in state.actions if act.node] |
|
1197 | 1197 | warnverifyactions(ui, repo, actions, state, ctxs) |
|
1198 | 1198 | state.actions = actions |
|
1199 | 1199 | state.write() |
|
1200 | 1200 | |
|
1201 | 1201 | def _newhistedit(ui, repo, state, revs, freeargs, opts): |
|
1202 | 1202 | outg = opts.get('outgoing') |
|
1203 | 1203 | rules = opts.get('commands', '') |
|
1204 | 1204 | force = opts.get('force') |
|
1205 | 1205 | |
|
1206 | 1206 | cmdutil.checkunfinished(repo) |
|
1207 | 1207 | cmdutil.bailifchanged(repo) |
|
1208 | 1208 | |
|
1209 | 1209 | topmost, empty = repo.dirstate.parents() |
|
1210 | 1210 | if outg: |
|
1211 | 1211 | if freeargs: |
|
1212 | 1212 | remote = freeargs[0] |
|
1213 | 1213 | else: |
|
1214 | 1214 | remote = None |
|
1215 | 1215 | root = findoutgoing(ui, repo, remote, force, opts) |
|
1216 | 1216 | else: |
|
1217 | 1217 | rr = list(repo.set('roots(%ld)', scmutil.revrange(repo, revs))) |
|
1218 | 1218 | if len(rr) != 1: |
|
1219 | 1219 | raise error.Abort(_('The specified revisions must have ' |
|
1220 | 1220 | 'exactly one common root')) |
|
1221 | 1221 | root = rr[0].node() |
|
1222 | 1222 | |
|
1223 | 1223 | revs = between(repo, root, topmost, state.keep) |
|
1224 | 1224 | if not revs: |
|
1225 | 1225 | raise error.Abort(_('%s is not an ancestor of working directory') % |
|
1226 | 1226 | node.short(root)) |
|
1227 | 1227 | |
|
1228 | 1228 | ctxs = [repo[r] for r in revs] |
|
1229 | 1229 | if not rules: |
|
1230 | 1230 | comment = geteditcomment(ui, node.short(root), node.short(topmost)) |
|
1231 | 1231 | actions = [pick(state, r) for r in revs] |
|
1232 | 1232 | rules = ruleeditor(repo, ui, actions, comment) |
|
1233 | 1233 | else: |
|
1234 | 1234 | rules = _readfile(ui, rules) |
|
1235 | 1235 | actions = parserules(rules, state) |
|
1236 | 1236 | warnverifyactions(ui, repo, actions, state, ctxs) |
|
1237 | 1237 | |
|
1238 | 1238 | parentctxnode = repo[root].parents()[0].node() |
|
1239 | 1239 | |
|
1240 | 1240 | state.parentctxnode = parentctxnode |
|
1241 | 1241 | state.actions = actions |
|
1242 | 1242 | state.topmost = topmost |
|
1243 | 1243 | state.replacements = [] |
|
1244 | 1244 | |
|
1245 | 1245 | # Create a backup so we can always abort completely. |
|
1246 | 1246 | backupfile = None |
|
1247 | 1247 | if not obsolete.isenabled(repo, obsolete.createmarkersopt): |
|
1248 | 1248 | backupfile = repair._bundle(repo, [parentctxnode], [topmost], root, |
|
1249 | 1249 | 'histedit') |
|
1250 | 1250 | state.backupfile = backupfile |
|
1251 | 1251 | |
|
1252 | 1252 | def _getsummary(ctx): |
|
1253 | 1253 | # a common pattern is to extract the summary but default to the empty |
|
1254 | 1254 | # string |
|
1255 | 1255 | summary = ctx.description() or '' |
|
1256 | 1256 | if summary: |
|
1257 | 1257 | summary = summary.splitlines()[0] |
|
1258 | 1258 | return summary |
|
1259 | 1259 | |
|
1260 | 1260 | def bootstrapcontinue(ui, state, opts): |
|
1261 | 1261 | repo = state.repo |
|
1262 | 1262 | if state.actions: |
|
1263 | 1263 | actobj = state.actions.pop(0) |
|
1264 | 1264 | |
|
1265 | 1265 | if _isdirtywc(repo): |
|
1266 | 1266 | actobj.continuedirty() |
|
1267 | 1267 | if _isdirtywc(repo): |
|
1268 | 1268 | abortdirty() |
|
1269 | 1269 | |
|
1270 | 1270 | parentctx, replacements = actobj.continueclean() |
|
1271 | 1271 | |
|
1272 | 1272 | state.parentctxnode = parentctx.node() |
|
1273 | 1273 | state.replacements.extend(replacements) |
|
1274 | 1274 | |
|
1275 | 1275 | return state |
|
1276 | 1276 | |
|
1277 | 1277 | def between(repo, old, new, keep): |
|
1278 | 1278 | """select and validate the set of revision to edit |
|
1279 | 1279 | |
|
1280 | 1280 | When keep is false, the specified set can't have children.""" |
|
1281 | 1281 | ctxs = list(repo.set('%n::%n', old, new)) |
|
1282 | 1282 | if ctxs and not keep: |
|
1283 | 1283 | if (not obsolete.isenabled(repo, obsolete.allowunstableopt) and |
|
1284 | 1284 | repo.revs('(%ld::) - (%ld)', ctxs, ctxs)): |
|
1285 | 1285 | raise error.Abort(_('can only histedit a changeset together ' |
|
1286 | 1286 | 'with all its descendants')) |
|
1287 | 1287 | if repo.revs('(%ld) and merge()', ctxs): |
|
1288 | 1288 | raise error.Abort(_('cannot edit history that contains merges')) |
|
1289 | 1289 | root = ctxs[0] # list is already sorted by repo.set |
|
1290 | 1290 | if not root.mutable(): |
|
1291 | 1291 | raise error.Abort(_('cannot edit public changeset: %s') % root, |
|
1292 | 1292 | hint=_("see 'hg help phases' for details")) |
|
1293 | 1293 | return [c.node() for c in ctxs] |
|
1294 | 1294 | |
|
1295 | 1295 | def ruleeditor(repo, ui, actions, editcomment=""): |
|
1296 | 1296 | """open an editor to edit rules |
|
1297 | 1297 | |
|
1298 | 1298 | rules are in the format [ [act, ctx], ...] like in state.rules |
|
1299 | 1299 | """ |
|
1300 | 1300 | if repo.ui.configbool("experimental", "histedit.autoverb"): |
|
1301 | 1301 | newact = util.sortdict() |
|
1302 | 1302 | for act in actions: |
|
1303 | 1303 | ctx = repo[act.node] |
|
1304 | 1304 | summary = _getsummary(ctx) |
|
1305 | 1305 | fword = summary.split(' ', 1)[0].lower() |
|
1306 | 1306 | added = False |
|
1307 | 1307 | |
|
1308 | 1308 | # if it doesn't end with the special character '!' just skip this |
|
1309 | 1309 | if fword.endswith('!'): |
|
1310 | 1310 | fword = fword[:-1] |
|
1311 | 1311 | if fword in primaryactions | secondaryactions | tertiaryactions: |
|
1312 | 1312 | act.verb = fword |
|
1313 | 1313 | # get the target summary |
|
1314 | 1314 | tsum = summary[len(fword) + 1:].lstrip() |
|
1315 | 1315 | # safe but slow: reverse iterate over the actions so we |
|
1316 | 1316 | # don't clash on two commits having the same summary |
|
1317 | 1317 | for na, l in reversed(list(newact.iteritems())): |
|
1318 | 1318 | actx = repo[na.node] |
|
1319 | 1319 | asum = _getsummary(actx) |
|
1320 | 1320 | if asum == tsum: |
|
1321 | 1321 | added = True |
|
1322 | 1322 | l.append(act) |
|
1323 | 1323 | break |
|
1324 | 1324 | |
|
1325 | 1325 | if not added: |
|
1326 | 1326 | newact[act] = [] |
|
1327 | 1327 | |
|
1328 | 1328 | # copy over and flatten the new list |
|
1329 | 1329 | actions = [] |
|
1330 | 1330 | for na, l in newact.iteritems(): |
|
1331 | 1331 | actions.append(na) |
|
1332 | 1332 | actions += l |
|
1333 | 1333 | |
|
1334 | 1334 | rules = '\n'.join([act.torule() for act in actions]) |
|
1335 | 1335 | rules += '\n\n' |
|
1336 | 1336 | rules += editcomment |
|
1337 | 1337 | rules = ui.edit(rules, ui.username(), {'prefix': 'histedit'}) |
|
1338 | 1338 | |
|
1339 | 1339 | # Save edit rules in .hg/histedit-last-edit.txt in case |
|
1340 | 1340 | # the user needs to ask for help after something |
|
1341 | 1341 | # surprising happens. |
|
1342 | 1342 | f = open(repo.join('histedit-last-edit.txt'), 'w') |
|
1343 | 1343 | f.write(rules) |
|
1344 | 1344 | f.close() |
|
1345 | 1345 | |
|
1346 | 1346 | return rules |
|
1347 | 1347 | |
|
1348 | 1348 | def parserules(rules, state): |
|
1349 | 1349 | """Read the histedit rules string and return list of action objects """ |
|
1350 | 1350 | rules = [l for l in (r.strip() for r in rules.splitlines()) |
|
1351 | 1351 | if l and not l.startswith('#')] |
|
1352 | 1352 | actions = [] |
|
1353 | 1353 | for r in rules: |
|
1354 | 1354 | if ' ' not in r: |
|
1355 | 1355 | raise error.ParseError(_('malformed line "%s"') % r) |
|
1356 | 1356 | verb, rest = r.split(' ', 1) |
|
1357 | 1357 | |
|
1358 | 1358 | if verb not in actiontable: |
|
1359 | 1359 | raise error.ParseError(_('unknown action "%s"') % verb) |
|
1360 | 1360 | |
|
1361 | 1361 | action = actiontable[verb].fromrule(state, rest) |
|
1362 | 1362 | actions.append(action) |
|
1363 | 1363 | return actions |
|
1364 | 1364 | |
|
1365 | 1365 | def warnverifyactions(ui, repo, actions, state, ctxs): |
|
1366 | 1366 | try: |
|
1367 | 1367 | verifyactions(actions, state, ctxs) |
|
1368 | 1368 | except error.ParseError: |
|
1369 | 1369 | if repo.vfs.exists('histedit-last-edit.txt'): |
|
1370 | 1370 | ui.warn(_('warning: histedit rules saved ' |
|
1371 | 1371 | 'to: .hg/histedit-last-edit.txt\n')) |
|
1372 | 1372 | raise |
|
1373 | 1373 | |
|
1374 | 1374 | def verifyactions(actions, state, ctxs): |
|
1375 | 1375 | """Verify that there exists exactly one action per given changeset and |
|
1376 | 1376 | other constraints. |
|
1377 | 1377 | |
|
1378 | 1378 | Will abort if there are to many or too few rules, a malformed rule, |
|
1379 | 1379 | or a rule on a changeset outside of the user-given range. |
|
1380 | 1380 | """ |
|
1381 | 1381 | expected = set(c.node() for c in ctxs) |
|
1382 | 1382 | seen = set() |
|
1383 | 1383 | prev = None |
|
1384 | 1384 | for action in actions: |
|
1385 | 1385 | action.verify(prev, expected, seen) |
|
1386 | 1386 | prev = action |
|
1387 | 1387 | if action.node is not None: |
|
1388 | 1388 | seen.add(action.node) |
|
1389 | 1389 | missing = sorted(expected - seen) # sort to stabilize output |
|
1390 | 1390 | |
|
1391 | 1391 | if state.repo.ui.configbool('histedit', 'dropmissing'): |
|
1392 | 1392 | if len(actions) == 0: |
|
1393 | 1393 | raise error.ParseError(_('no rules provided'), |
|
1394 | 1394 | hint=_('use strip extension to remove commits')) |
|
1395 | 1395 | |
|
1396 | 1396 | drops = [drop(state, n) for n in missing] |
|
1397 | 1397 | # put the in the beginning so they execute immediately and |
|
1398 | 1398 | # don't show in the edit-plan in the future |
|
1399 | 1399 | actions[:0] = drops |
|
1400 | 1400 | elif missing: |
|
1401 | 1401 | raise error.ParseError(_('missing rules for changeset %s') % |
|
1402 | 1402 | node.short(missing[0]), |
|
1403 | 1403 | hint=_('use "drop %s" to discard, see also: ' |
|
1404 | 1404 | "'hg help -e histedit.config'") |
|
1405 | 1405 | % node.short(missing[0])) |
|
1406 | 1406 | |
|
1407 | 1407 | def adjustreplacementsfrommarkers(repo, oldreplacements): |
|
1408 |
"""Adjust replacements from obsolescen |
|
|
1408 | """Adjust replacements from obsolescence markers | |
|
1409 | 1409 | |
|
1410 | 1410 | Replacements structure is originally generated based on |
|
1411 | 1411 | histedit's state and does not account for changes that are |
|
1412 | 1412 | not recorded there. This function fixes that by adding |
|
1413 |
data read from obsolescen |
|
|
1413 | data read from obsolescence markers""" | |
|
1414 | 1414 | if not obsolete.isenabled(repo, obsolete.createmarkersopt): |
|
1415 | 1415 | return oldreplacements |
|
1416 | 1416 | |
|
1417 | 1417 | unfi = repo.unfiltered() |
|
1418 | 1418 | nm = unfi.changelog.nodemap |
|
1419 | 1419 | obsstore = repo.obsstore |
|
1420 | 1420 | newreplacements = list(oldreplacements) |
|
1421 | 1421 | oldsuccs = [r[1] for r in oldreplacements] |
|
1422 | 1422 | # successors that have already been added to succstocheck once |
|
1423 | 1423 | seensuccs = set().union(*oldsuccs) # create a set from an iterable of tuples |
|
1424 | 1424 | succstocheck = list(seensuccs) |
|
1425 | 1425 | while succstocheck: |
|
1426 | 1426 | n = succstocheck.pop() |
|
1427 | 1427 | missing = nm.get(n) is None |
|
1428 | 1428 | markers = obsstore.successors.get(n, ()) |
|
1429 | 1429 | if missing and not markers: |
|
1430 | 1430 | # dead end, mark it as such |
|
1431 | 1431 | newreplacements.append((n, ())) |
|
1432 | 1432 | for marker in markers: |
|
1433 | 1433 | nsuccs = marker[1] |
|
1434 | 1434 | newreplacements.append((n, nsuccs)) |
|
1435 | 1435 | for nsucc in nsuccs: |
|
1436 | 1436 | if nsucc not in seensuccs: |
|
1437 | 1437 | seensuccs.add(nsucc) |
|
1438 | 1438 | succstocheck.append(nsucc) |
|
1439 | 1439 | |
|
1440 | 1440 | return newreplacements |
|
1441 | 1441 | |
|
1442 | 1442 | def processreplacement(state): |
|
1443 | 1443 | """process the list of replacements to return |
|
1444 | 1444 | |
|
1445 | 1445 | 1) the final mapping between original and created nodes |
|
1446 | 1446 | 2) the list of temporary node created by histedit |
|
1447 | 1447 | 3) the list of new commit created by histedit""" |
|
1448 | 1448 | replacements = adjustreplacementsfrommarkers(state.repo, state.replacements) |
|
1449 | 1449 | allsuccs = set() |
|
1450 | 1450 | replaced = set() |
|
1451 | 1451 | fullmapping = {} |
|
1452 | 1452 | # initialize basic set |
|
1453 | 1453 | # fullmapping records all operations recorded in replacement |
|
1454 | 1454 | for rep in replacements: |
|
1455 | 1455 | allsuccs.update(rep[1]) |
|
1456 | 1456 | replaced.add(rep[0]) |
|
1457 | 1457 | fullmapping.setdefault(rep[0], set()).update(rep[1]) |
|
1458 | 1458 | new = allsuccs - replaced |
|
1459 | 1459 | tmpnodes = allsuccs & replaced |
|
1460 | 1460 | # Reduce content fullmapping into direct relation between original nodes |
|
1461 | 1461 | # and final node created during history edition |
|
1462 | 1462 | # Dropped changeset are replaced by an empty list |
|
1463 | 1463 | toproceed = set(fullmapping) |
|
1464 | 1464 | final = {} |
|
1465 | 1465 | while toproceed: |
|
1466 | 1466 | for x in list(toproceed): |
|
1467 | 1467 | succs = fullmapping[x] |
|
1468 | 1468 | for s in list(succs): |
|
1469 | 1469 | if s in toproceed: |
|
1470 | 1470 | # non final node with unknown closure |
|
1471 | 1471 | # We can't process this now |
|
1472 | 1472 | break |
|
1473 | 1473 | elif s in final: |
|
1474 | 1474 | # non final node, replace with closure |
|
1475 | 1475 | succs.remove(s) |
|
1476 | 1476 | succs.update(final[s]) |
|
1477 | 1477 | else: |
|
1478 | 1478 | final[x] = succs |
|
1479 | 1479 | toproceed.remove(x) |
|
1480 | 1480 | # remove tmpnodes from final mapping |
|
1481 | 1481 | for n in tmpnodes: |
|
1482 | 1482 | del final[n] |
|
1483 | 1483 | # we expect all changes involved in final to exist in the repo |
|
1484 | 1484 | # turn `final` into list (topologically sorted) |
|
1485 | 1485 | nm = state.repo.changelog.nodemap |
|
1486 | 1486 | for prec, succs in final.items(): |
|
1487 | 1487 | final[prec] = sorted(succs, key=nm.get) |
|
1488 | 1488 | |
|
1489 | 1489 | # computed topmost element (necessary for bookmark) |
|
1490 | 1490 | if new: |
|
1491 | 1491 | newtopmost = sorted(new, key=state.repo.changelog.rev)[-1] |
|
1492 | 1492 | elif not final: |
|
1493 | 1493 | # Nothing rewritten at all. we won't need `newtopmost` |
|
1494 | 1494 | # It is the same as `oldtopmost` and `processreplacement` know it |
|
1495 | 1495 | newtopmost = None |
|
1496 | 1496 | else: |
|
1497 | 1497 | # every body died. The newtopmost is the parent of the root. |
|
1498 | 1498 | r = state.repo.changelog.rev |
|
1499 | 1499 | newtopmost = state.repo[sorted(final, key=r)[0]].p1().node() |
|
1500 | 1500 | |
|
1501 | 1501 | return final, tmpnodes, new, newtopmost |
|
1502 | 1502 | |
|
1503 | 1503 | def movebookmarks(ui, repo, mapping, oldtopmost, newtopmost): |
|
1504 | 1504 | """Move bookmark from old to newly created node""" |
|
1505 | 1505 | if not mapping: |
|
1506 | 1506 | # if nothing got rewritten there is not purpose for this function |
|
1507 | 1507 | return |
|
1508 | 1508 | moves = [] |
|
1509 | 1509 | for bk, old in sorted(repo._bookmarks.iteritems()): |
|
1510 | 1510 | if old == oldtopmost: |
|
1511 | 1511 | # special case ensure bookmark stay on tip. |
|
1512 | 1512 | # |
|
1513 | 1513 | # This is arguably a feature and we may only want that for the |
|
1514 | 1514 | # active bookmark. But the behavior is kept compatible with the old |
|
1515 | 1515 | # version for now. |
|
1516 | 1516 | moves.append((bk, newtopmost)) |
|
1517 | 1517 | continue |
|
1518 | 1518 | base = old |
|
1519 | 1519 | new = mapping.get(base, None) |
|
1520 | 1520 | if new is None: |
|
1521 | 1521 | continue |
|
1522 | 1522 | while not new: |
|
1523 | 1523 | # base is killed, trying with parent |
|
1524 | 1524 | base = repo[base].p1().node() |
|
1525 | 1525 | new = mapping.get(base, (base,)) |
|
1526 | 1526 | # nothing to move |
|
1527 | 1527 | moves.append((bk, new[-1])) |
|
1528 | 1528 | if moves: |
|
1529 | 1529 | lock = tr = None |
|
1530 | 1530 | try: |
|
1531 | 1531 | lock = repo.lock() |
|
1532 | 1532 | tr = repo.transaction('histedit') |
|
1533 | 1533 | marks = repo._bookmarks |
|
1534 | 1534 | for mark, new in moves: |
|
1535 | 1535 | old = marks[mark] |
|
1536 | 1536 | ui.note(_('histedit: moving bookmarks %s from %s to %s\n') |
|
1537 | 1537 | % (mark, node.short(old), node.short(new))) |
|
1538 | 1538 | marks[mark] = new |
|
1539 | 1539 | marks.recordchange(tr) |
|
1540 | 1540 | tr.close() |
|
1541 | 1541 | finally: |
|
1542 | 1542 | release(tr, lock) |
|
1543 | 1543 | |
|
1544 | 1544 | def cleanupnode(ui, repo, name, nodes): |
|
1545 | 1545 | """strip a group of nodes from the repository |
|
1546 | 1546 | |
|
1547 | 1547 | The set of node to strip may contains unknown nodes.""" |
|
1548 | 1548 | ui.debug('should strip %s nodes %s\n' % |
|
1549 | 1549 | (name, ', '.join([node.short(n) for n in nodes]))) |
|
1550 | 1550 | with repo.lock(): |
|
1551 | 1551 | # do not let filtering get in the way of the cleanse |
|
1552 | 1552 | # we should probably get rid of obsolescence marker created during the |
|
1553 | 1553 | # histedit, but we currently do not have such information. |
|
1554 | 1554 | repo = repo.unfiltered() |
|
1555 | 1555 | # Find all nodes that need to be stripped |
|
1556 | 1556 | # (we use %lr instead of %ln to silently ignore unknown items) |
|
1557 | 1557 | nm = repo.changelog.nodemap |
|
1558 | 1558 | nodes = sorted(n for n in nodes if n in nm) |
|
1559 | 1559 | roots = [c.node() for c in repo.set("roots(%ln)", nodes)] |
|
1560 | 1560 | for c in roots: |
|
1561 | 1561 | # We should process node in reverse order to strip tip most first. |
|
1562 | 1562 | # but this trigger a bug in changegroup hook. |
|
1563 | 1563 | # This would reduce bundle overhead |
|
1564 | 1564 | repair.strip(ui, repo, c) |
|
1565 | 1565 | |
|
1566 | 1566 | def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs): |
|
1567 | 1567 | if isinstance(nodelist, str): |
|
1568 | 1568 | nodelist = [nodelist] |
|
1569 | 1569 | if os.path.exists(os.path.join(repo.path, 'histedit-state')): |
|
1570 | 1570 | state = histeditstate(repo) |
|
1571 | 1571 | state.read() |
|
1572 | 1572 | histedit_nodes = set([action.node for action |
|
1573 | 1573 | in state.actions if action.node]) |
|
1574 | 1574 | common_nodes = histedit_nodes & set(nodelist) |
|
1575 | 1575 | if common_nodes: |
|
1576 | 1576 | raise error.Abort(_("histedit in progress, can't strip %s") |
|
1577 | 1577 | % ', '.join(node.short(x) for x in common_nodes)) |
|
1578 | 1578 | return orig(ui, repo, nodelist, *args, **kwargs) |
|
1579 | 1579 | |
|
1580 | 1580 | extensions.wrapfunction(repair, 'strip', stripwrapper) |
|
1581 | 1581 | |
|
1582 | 1582 | def summaryhook(ui, repo): |
|
1583 | 1583 | if not os.path.exists(repo.join('histedit-state')): |
|
1584 | 1584 | return |
|
1585 | 1585 | state = histeditstate(repo) |
|
1586 | 1586 | state.read() |
|
1587 | 1587 | if state.actions: |
|
1588 | 1588 | # i18n: column positioning for "hg summary" |
|
1589 | 1589 | ui.write(_('hist: %s (histedit --continue)\n') % |
|
1590 | 1590 | (ui.label(_('%d remaining'), 'histedit.remaining') % |
|
1591 | 1591 | len(state.actions))) |
|
1592 | 1592 | |
|
1593 | 1593 | def extsetup(ui): |
|
1594 | 1594 | cmdutil.summaryhooks.add('histedit', summaryhook) |
|
1595 | 1595 | cmdutil.unfinishedstates.append( |
|
1596 | 1596 | ['histedit-state', False, True, _('histedit in progress'), |
|
1597 | 1597 | _("use 'hg histedit --continue' or 'hg histedit --abort'")]) |
|
1598 | 1598 | cmdutil.afterresolvedstates.append( |
|
1599 | 1599 | ['histedit-state', _('hg histedit --continue')]) |
|
1600 | 1600 | if ui.configbool("experimental", "histeditng"): |
|
1601 | 1601 | globals()['base'] = action(['base', 'b'], |
|
1602 | 1602 | _('checkout changeset and apply further changesets from there') |
|
1603 | 1603 | )(base) |
@@ -1,129 +1,129 | |||
|
1 | 1 | # logtoprocess.py - send ui.log() data to a subprocess |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2016 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | """Send ui.log() data to a subprocess (EXPERIMENTAL) |
|
8 | 8 | |
|
9 | 9 | This extension lets you specify a shell command per ui.log() event, |
|
10 | 10 | sending all remaining arguments to as environment variables to that command. |
|
11 | 11 | |
|
12 | 12 | Each positional argument to the method results in a `MSG[N]` key in the |
|
13 | 13 | environment, starting at 1 (so `MSG1`, `MSG2`, etc.). Each keyword argument |
|
14 | 14 | is set as a `OPT_UPPERCASE_KEY` variable (so the key is uppercased, and |
|
15 | 15 | prefixed with `OPT_`). The original event name is passed in the `EVENT` |
|
16 | 16 | environment variable, and the process ID of mercurial is given in `HGPID`. |
|
17 | 17 | |
|
18 | 18 | So given a call `ui.log('foo', 'bar', 'baz', spam='eggs'), a script configured |
|
19 | 19 | for the `foo` event can expect an environment with `MSG1=bar`, `MSG2=baz`, and |
|
20 | 20 | `OPT_SPAM=eggs`. |
|
21 | 21 | |
|
22 | 22 | Scripts are configured in the `[logtoprocess]` section, each key an event name. |
|
23 | 23 | For example:: |
|
24 | 24 | |
|
25 | 25 | [logtoprocess] |
|
26 | 26 | commandexception = echo "$MSG2$MSG3" > /var/log/mercurial_exceptions.log |
|
27 | 27 | |
|
28 | 28 | would log the warning message and traceback of any failed command dispatch. |
|
29 | 29 | |
|
30 | Scripts are run asychronously as detached daemon processes; mercurial will | |
|
30 | Scripts are run asynchronously as detached daemon processes; mercurial will | |
|
31 | 31 | not ensure that they exit cleanly. |
|
32 | 32 | |
|
33 | 33 | """ |
|
34 | 34 | |
|
35 | 35 | from __future__ import absolute_import |
|
36 | 36 | |
|
37 | 37 | import itertools |
|
38 | 38 | import os |
|
39 | 39 | import platform |
|
40 | 40 | import subprocess |
|
41 | 41 | import sys |
|
42 | 42 | |
|
43 | 43 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
44 | 44 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
45 | 45 | # be specifying the version(s) of Mercurial they are tested with, or |
|
46 | 46 | # leave the attribute unspecified. |
|
47 | 47 | testedwith = 'ships-with-hg-core' |
|
48 | 48 | |
|
49 | 49 | def uisetup(ui): |
|
50 | 50 | if platform.system() == 'Windows': |
|
51 | 51 | # no fork on Windows, but we can create a detached process |
|
52 | 52 | # https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx |
|
53 | 53 | # No stdlib constant exists for this value |
|
54 | 54 | DETACHED_PROCESS = 0x00000008 |
|
55 | 55 | _creationflags = DETACHED_PROCESS | subprocess.CREATE_NEW_PROCESS_GROUP |
|
56 | 56 | |
|
57 | 57 | def runshellcommand(script, env): |
|
58 | 58 | # we can't use close_fds *and* redirect stdin. I'm not sure that we |
|
59 | 59 | # need to because the detached process has no console connection. |
|
60 | 60 | subprocess.Popen( |
|
61 | 61 | script, shell=True, env=env, close_fds=True, |
|
62 | 62 | creationflags=_creationflags) |
|
63 | 63 | else: |
|
64 | 64 | def runshellcommand(script, env): |
|
65 | 65 | # double-fork to completely detach from the parent process |
|
66 | 66 | # based on http://code.activestate.com/recipes/278731 |
|
67 | 67 | pid = os.fork() |
|
68 | 68 | if pid: |
|
69 | 69 | # parent |
|
70 | 70 | return |
|
71 | 71 | # subprocess.Popen() forks again, all we need to add is |
|
72 | 72 | # flag the new process as a new session. |
|
73 | 73 | if sys.version_info < (3, 2): |
|
74 | 74 | newsession = {'preexec_fn': os.setsid} |
|
75 | 75 | else: |
|
76 | 76 | newsession = {'start_new_session': True} |
|
77 | 77 | try: |
|
78 | 78 | # connect stdin to devnull to make sure the subprocess can't |
|
79 | 79 | # muck up that stream for mercurial. |
|
80 | 80 | subprocess.Popen( |
|
81 | 81 | script, shell=True, stdin=open(os.devnull, 'r'), env=env, |
|
82 | 82 | close_fds=True, **newsession) |
|
83 | 83 | finally: |
|
84 | 84 | # mission accomplished, this child needs to exit and not |
|
85 | 85 | # continue the hg process here. |
|
86 | 86 | os._exit(0) |
|
87 | 87 | |
|
88 | 88 | class logtoprocessui(ui.__class__): |
|
89 | 89 | def log(self, event, *msg, **opts): |
|
90 | 90 | """Map log events to external commands |
|
91 | 91 | |
|
92 | 92 | Arguments are passed on as environment variables. |
|
93 | 93 | |
|
94 | 94 | """ |
|
95 | 95 | script = self.config('logtoprocess', event) |
|
96 | 96 | if script: |
|
97 | 97 | if msg: |
|
98 | 98 | # try to format the log message given the remaining |
|
99 | 99 | # arguments |
|
100 | 100 | try: |
|
101 | 101 | # Python string formatting with % either uses a |
|
102 | 102 | # dictionary *or* tuple, but not both. If we have |
|
103 | 103 | # keyword options, assume we need a mapping. |
|
104 | 104 | formatted = msg[0] % (opts or msg[1:]) |
|
105 | 105 | except (TypeError, KeyError): |
|
106 | 106 | # Failed to apply the arguments, ignore |
|
107 | 107 | formatted = msg[0] |
|
108 | 108 | messages = (formatted,) + msg[1:] |
|
109 | 109 | else: |
|
110 | 110 | messages = msg |
|
111 | 111 | # positional arguments are listed as MSG[N] keys in the |
|
112 | 112 | # environment |
|
113 | 113 | msgpairs = ( |
|
114 | 114 | ('MSG{0:d}'.format(i), str(m)) |
|
115 | 115 | for i, m in enumerate(messages, 1)) |
|
116 | 116 | # keyword arguments get prefixed with OPT_ and uppercased |
|
117 | 117 | optpairs = ( |
|
118 | 118 | ('OPT_{0}'.format(key.upper()), str(value)) |
|
119 | 119 | for key, value in opts.iteritems()) |
|
120 | 120 | env = dict(itertools.chain(os.environ.items(), |
|
121 | 121 | msgpairs, optpairs), |
|
122 | 122 | EVENT=event, HGPID=str(os.getpid())) |
|
123 | 123 | # Connect stdin to /dev/null to prevent child processes messing |
|
124 | 124 | # with mercurial's stdin. |
|
125 | 125 | runshellcommand(script, env) |
|
126 | 126 | return super(logtoprocessui, self).log(event, *msg, **opts) |
|
127 | 127 | |
|
128 | 128 | # Replace the class for this instance and all clones created from it: |
|
129 | 129 | ui.__class__ = logtoprocessui |
@@ -1,1452 +1,1452 | |||
|
1 | 1 | # rebase.py - rebasing feature for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | '''command to move sets of revisions to a different ancestor |
|
9 | 9 | |
|
10 | 10 | This extension lets you rebase changesets in an existing Mercurial |
|
11 | 11 | repository. |
|
12 | 12 | |
|
13 | 13 | For more information: |
|
14 | 14 | https://mercurial-scm.org/wiki/RebaseExtension |
|
15 | 15 | ''' |
|
16 | 16 | |
|
17 | 17 | from __future__ import absolute_import |
|
18 | 18 | |
|
19 | 19 | import errno |
|
20 | 20 | import os |
|
21 | 21 | |
|
22 | 22 | from mercurial.i18n import _ |
|
23 | 23 | from mercurial.node import ( |
|
24 | 24 | hex, |
|
25 | 25 | nullid, |
|
26 | 26 | nullrev, |
|
27 | 27 | short, |
|
28 | 28 | ) |
|
29 | 29 | from mercurial import ( |
|
30 | 30 | bookmarks, |
|
31 | 31 | cmdutil, |
|
32 | 32 | commands, |
|
33 | 33 | copies, |
|
34 | 34 | destutil, |
|
35 | 35 | error, |
|
36 | 36 | extensions, |
|
37 | 37 | hg, |
|
38 | 38 | lock, |
|
39 | 39 | merge as mergemod, |
|
40 | 40 | obsolete, |
|
41 | 41 | patch, |
|
42 | 42 | phases, |
|
43 | 43 | registrar, |
|
44 | 44 | repair, |
|
45 | 45 | repoview, |
|
46 | 46 | revset, |
|
47 | 47 | scmutil, |
|
48 | 48 | util, |
|
49 | 49 | ) |
|
50 | 50 | |
|
51 | 51 | release = lock.release |
|
52 | 52 | templateopts = commands.templateopts |
|
53 | 53 | |
|
54 | 54 | # The following constants are used throughout the rebase module. The ordering of |
|
55 | 55 | # their values must be maintained. |
|
56 | 56 | |
|
57 | 57 | # Indicates that a revision needs to be rebased |
|
58 | 58 | revtodo = -1 |
|
59 | 59 | nullmerge = -2 |
|
60 | 60 | revignored = -3 |
|
61 | 61 | # successor in rebase destination |
|
62 | 62 | revprecursor = -4 |
|
63 | 63 | # plain prune (no successor) |
|
64 | 64 | revpruned = -5 |
|
65 | 65 | revskipped = (revignored, revprecursor, revpruned) |
|
66 | 66 | |
|
67 | 67 | cmdtable = {} |
|
68 | 68 | command = cmdutil.command(cmdtable) |
|
69 | 69 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
70 | 70 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
71 | 71 | # be specifying the version(s) of Mercurial they are tested with, or |
|
72 | 72 | # leave the attribute unspecified. |
|
73 | 73 | testedwith = 'ships-with-hg-core' |
|
74 | 74 | |
|
75 | 75 | def _nothingtorebase(): |
|
76 | 76 | return 1 |
|
77 | 77 | |
|
78 | 78 | def _savegraft(ctx, extra): |
|
79 | 79 | s = ctx.extra().get('source', None) |
|
80 | 80 | if s is not None: |
|
81 | 81 | extra['source'] = s |
|
82 | 82 | s = ctx.extra().get('intermediate-source', None) |
|
83 | 83 | if s is not None: |
|
84 | 84 | extra['intermediate-source'] = s |
|
85 | 85 | |
|
86 | 86 | def _savebranch(ctx, extra): |
|
87 | 87 | extra['branch'] = ctx.branch() |
|
88 | 88 | |
|
89 | 89 | def _makeextrafn(copiers): |
|
90 | 90 | """make an extrafn out of the given copy-functions. |
|
91 | 91 | |
|
92 | 92 | A copy function takes a context and an extra dict, and mutates the |
|
93 | 93 | extra dict as needed based on the given context. |
|
94 | 94 | """ |
|
95 | 95 | def extrafn(ctx, extra): |
|
96 | 96 | for c in copiers: |
|
97 | 97 | c(ctx, extra) |
|
98 | 98 | return extrafn |
|
99 | 99 | |
|
100 | 100 | def _destrebase(repo, sourceset, destspace=None): |
|
101 | 101 | """small wrapper around destmerge to pass the right extra args |
|
102 | 102 | |
|
103 | 103 | Please wrap destutil.destmerge instead.""" |
|
104 | 104 | return destutil.destmerge(repo, action='rebase', sourceset=sourceset, |
|
105 | 105 | onheadcheck=False, destspace=destspace) |
|
106 | 106 | |
|
107 | 107 | revsetpredicate = registrar.revsetpredicate() |
|
108 | 108 | |
|
109 | 109 | @revsetpredicate('_destrebase') |
|
110 | 110 | def _revsetdestrebase(repo, subset, x): |
|
111 | 111 | # ``_rebasedefaultdest()`` |
|
112 | 112 | |
|
113 | 113 | # default destination for rebase. |
|
114 | 114 | # # XXX: Currently private because I expect the signature to change. |
|
115 | 115 | # # XXX: - bailing out in case of ambiguity vs returning all data. |
|
116 | 116 | # i18n: "_rebasedefaultdest" is a keyword |
|
117 | 117 | sourceset = None |
|
118 | 118 | if x is not None: |
|
119 | 119 | sourceset = revset.getset(repo, revset.fullreposet(repo), x) |
|
120 | 120 | return subset & revset.baseset([_destrebase(repo, sourceset)]) |
|
121 | 121 | |
|
122 | 122 | class rebaseruntime(object): |
|
123 | 123 | """This class is a container for rebase runtime state""" |
|
124 | 124 | def __init__(self, repo, ui, opts=None): |
|
125 | 125 | if opts is None: |
|
126 | 126 | opts = {} |
|
127 | 127 | |
|
128 | 128 | self.repo = repo |
|
129 | 129 | self.ui = ui |
|
130 | 130 | self.opts = opts |
|
131 | 131 | self.originalwd = None |
|
132 | 132 | self.external = nullrev |
|
133 | 133 | # Mapping between the old revision id and either what is the new rebased |
|
134 | 134 | # revision or what needs to be done with the old revision. The state |
|
135 | 135 | # dict will be what contains most of the rebase progress state. |
|
136 | 136 | self.state = {} |
|
137 | 137 | self.activebookmark = None |
|
138 | 138 | self.currentbookmarks = None |
|
139 | 139 | self.target = None |
|
140 | 140 | self.skipped = set() |
|
141 | 141 | self.targetancestors = set() |
|
142 | 142 | |
|
143 | 143 | self.collapsef = opts.get('collapse', False) |
|
144 | 144 | self.collapsemsg = cmdutil.logmessage(ui, opts) |
|
145 | 145 | self.date = opts.get('date', None) |
|
146 | 146 | |
|
147 | 147 | e = opts.get('extrafn') # internal, used by e.g. hgsubversion |
|
148 | 148 | self.extrafns = [_savegraft] |
|
149 | 149 | if e: |
|
150 | 150 | self.extrafns = [e] |
|
151 | 151 | |
|
152 | 152 | self.keepf = opts.get('keep', False) |
|
153 | 153 | self.keepbranchesf = opts.get('keepbranches', False) |
|
154 | 154 | # keepopen is not meant for use on the command line, but by |
|
155 | 155 | # other extensions |
|
156 | 156 | self.keepopen = opts.get('keepopen', False) |
|
157 | 157 | self.obsoletenotrebased = {} |
|
158 | 158 | |
|
159 | 159 | def restorestatus(self): |
|
160 | 160 | """Restore a previously stored status""" |
|
161 | 161 | repo = self.repo |
|
162 | 162 | keepbranches = None |
|
163 | 163 | target = None |
|
164 | 164 | collapse = False |
|
165 | 165 | external = nullrev |
|
166 | 166 | activebookmark = None |
|
167 | 167 | state = {} |
|
168 | 168 | |
|
169 | 169 | try: |
|
170 | 170 | f = repo.vfs("rebasestate") |
|
171 | 171 | for i, l in enumerate(f.read().splitlines()): |
|
172 | 172 | if i == 0: |
|
173 | 173 | originalwd = repo[l].rev() |
|
174 | 174 | elif i == 1: |
|
175 | 175 | target = repo[l].rev() |
|
176 | 176 | elif i == 2: |
|
177 | 177 | external = repo[l].rev() |
|
178 | 178 | elif i == 3: |
|
179 | 179 | collapse = bool(int(l)) |
|
180 | 180 | elif i == 4: |
|
181 | 181 | keep = bool(int(l)) |
|
182 | 182 | elif i == 5: |
|
183 | 183 | keepbranches = bool(int(l)) |
|
184 | 184 | elif i == 6 and not (len(l) == 81 and ':' in l): |
|
185 | 185 | # line 6 is a recent addition, so for backwards |
|
186 | 186 | # compatibility check that the line doesn't look like the |
|
187 | 187 | # oldrev:newrev lines |
|
188 | 188 | activebookmark = l |
|
189 | 189 | else: |
|
190 | 190 | oldrev, newrev = l.split(':') |
|
191 | 191 | if newrev in (str(nullmerge), str(revignored), |
|
192 | 192 | str(revprecursor), str(revpruned)): |
|
193 | 193 | state[repo[oldrev].rev()] = int(newrev) |
|
194 | 194 | elif newrev == nullid: |
|
195 | 195 | state[repo[oldrev].rev()] = revtodo |
|
196 | 196 | # Legacy compat special case |
|
197 | 197 | else: |
|
198 | 198 | state[repo[oldrev].rev()] = repo[newrev].rev() |
|
199 | 199 | |
|
200 | 200 | except IOError as err: |
|
201 | 201 | if err.errno != errno.ENOENT: |
|
202 | 202 | raise |
|
203 | 203 | cmdutil.wrongtooltocontinue(repo, _('rebase')) |
|
204 | 204 | |
|
205 | 205 | if keepbranches is None: |
|
206 | 206 | raise error.Abort(_('.hg/rebasestate is incomplete')) |
|
207 | 207 | |
|
208 | 208 | skipped = set() |
|
209 | 209 | # recompute the set of skipped revs |
|
210 | 210 | if not collapse: |
|
211 | 211 | seen = set([target]) |
|
212 | 212 | for old, new in sorted(state.items()): |
|
213 | 213 | if new != revtodo and new in seen: |
|
214 | 214 | skipped.add(old) |
|
215 | 215 | seen.add(new) |
|
216 | 216 | repo.ui.debug('computed skipped revs: %s\n' % |
|
217 | 217 | (' '.join(str(r) for r in sorted(skipped)) or None)) |
|
218 | 218 | repo.ui.debug('rebase status resumed\n') |
|
219 | 219 | _setrebasesetvisibility(repo, state.keys()) |
|
220 | 220 | |
|
221 | 221 | self.originalwd = originalwd |
|
222 | 222 | self.target = target |
|
223 | 223 | self.state = state |
|
224 | 224 | self.skipped = skipped |
|
225 | 225 | self.collapsef = collapse |
|
226 | 226 | self.keepf = keep |
|
227 | 227 | self.keepbranchesf = keepbranches |
|
228 | 228 | self.external = external |
|
229 | 229 | self.activebookmark = activebookmark |
|
230 | 230 | |
|
231 | 231 | def _handleskippingobsolete(self, rebaserevs, obsoleterevs, target): |
|
232 | 232 | """Compute structures necessary for skipping obsolete revisions |
|
233 | 233 | |
|
234 | 234 | rebaserevs: iterable of all revisions that are to be rebased |
|
235 | 235 | obsoleterevs: iterable of all obsolete revisions in rebaseset |
|
236 | 236 | target: a destination revision for the rebase operation |
|
237 | 237 | """ |
|
238 | 238 | self.obsoletenotrebased = {} |
|
239 | 239 | if not self.ui.configbool('experimental', 'rebaseskipobsolete', |
|
240 | 240 | default=True): |
|
241 | 241 | return |
|
242 | 242 | rebaseset = set(rebaserevs) |
|
243 | 243 | obsoleteset = set(obsoleterevs) |
|
244 | 244 | self.obsoletenotrebased = _computeobsoletenotrebased(self.repo, |
|
245 | 245 | obsoleteset, target) |
|
246 | 246 | skippedset = set(self.obsoletenotrebased) |
|
247 | 247 | _checkobsrebase(self.repo, self.ui, obsoleteset, rebaseset, skippedset) |
|
248 | 248 | |
|
249 | 249 | def _prepareabortorcontinue(self, isabort): |
|
250 | 250 | try: |
|
251 | 251 | self.restorestatus() |
|
252 | 252 | self.collapsemsg = restorecollapsemsg(self.repo) |
|
253 | 253 | except error.RepoLookupError: |
|
254 | 254 | if isabort: |
|
255 | 255 | clearstatus(self.repo) |
|
256 | 256 | clearcollapsemsg(self.repo) |
|
257 | 257 | self.repo.ui.warn(_('rebase aborted (no revision is removed,' |
|
258 | 258 | ' only broken state is cleared)\n')) |
|
259 | 259 | return 0 |
|
260 | 260 | else: |
|
261 | 261 | msg = _('cannot continue inconsistent rebase') |
|
262 | 262 | hint = _('use "hg rebase --abort" to clear broken state') |
|
263 | 263 | raise error.Abort(msg, hint=hint) |
|
264 | 264 | if isabort: |
|
265 | 265 | return abort(self.repo, self.originalwd, self.target, |
|
266 | 266 | self.state, activebookmark=self.activebookmark) |
|
267 | 267 | |
|
268 | 268 | obsrevs = (r for r, st in self.state.items() if st == revprecursor) |
|
269 | 269 | self._handleskippingobsolete(self.state.keys(), obsrevs, self.target) |
|
270 | 270 | |
|
271 | 271 | def _preparenewrebase(self, dest, rebaseset): |
|
272 | 272 | if dest is None: |
|
273 | 273 | return _nothingtorebase() |
|
274 | 274 | |
|
275 | 275 | allowunstable = obsolete.isenabled(self.repo, obsolete.allowunstableopt) |
|
276 | 276 | if (not (self.keepf or allowunstable) |
|
277 | 277 | and self.repo.revs('first(children(%ld) - %ld)', |
|
278 | 278 | rebaseset, rebaseset)): |
|
279 | 279 | raise error.Abort( |
|
280 | 280 | _("can't remove original changesets with" |
|
281 | 281 | " unrebased descendants"), |
|
282 | 282 | hint=_('use --keep to keep original changesets')) |
|
283 | 283 | |
|
284 | 284 | obsrevs = _filterobsoleterevs(self.repo, set(rebaseset)) |
|
285 | 285 | self._handleskippingobsolete(rebaseset, obsrevs, dest) |
|
286 | 286 | |
|
287 | 287 | result = buildstate(self.repo, dest, rebaseset, self.collapsef, |
|
288 | 288 | self.obsoletenotrebased) |
|
289 | 289 | |
|
290 | 290 | if not result: |
|
291 | 291 | # Empty state built, nothing to rebase |
|
292 | 292 | self.ui.status(_('nothing to rebase\n')) |
|
293 | 293 | return _nothingtorebase() |
|
294 | 294 | |
|
295 | 295 | root = min(rebaseset) |
|
296 | 296 | if not self.keepf and not self.repo[root].mutable(): |
|
297 | 297 | raise error.Abort(_("can't rebase public changeset %s") |
|
298 | 298 | % self.repo[root], |
|
299 | 299 | hint=_("see 'hg help phases' for details")) |
|
300 | 300 | |
|
301 | 301 | (self.originalwd, self.target, self.state) = result |
|
302 | 302 | if self.collapsef: |
|
303 | 303 | self.targetancestors = self.repo.changelog.ancestors( |
|
304 | 304 | [self.target], |
|
305 | 305 | inclusive=True) |
|
306 | 306 | self.external = externalparent(self.repo, self.state, |
|
307 | 307 | self.targetancestors) |
|
308 | 308 | |
|
309 | 309 | if dest.closesbranch() and not self.keepbranchesf: |
|
310 | 310 | self.ui.status(_('reopening closed branch head %s\n') % dest) |
|
311 | 311 | |
|
312 | 312 | def _performrebase(self): |
|
313 | 313 | repo, ui, opts = self.repo, self.ui, self.opts |
|
314 | 314 | if self.keepbranchesf: |
|
315 | 315 | # insert _savebranch at the start of extrafns so if |
|
316 | 316 | # there's a user-provided extrafn it can clobber branch if |
|
317 | 317 | # desired |
|
318 | 318 | self.extrafns.insert(0, _savebranch) |
|
319 | 319 | if self.collapsef: |
|
320 | 320 | branches = set() |
|
321 | 321 | for rev in self.state: |
|
322 | 322 | branches.add(repo[rev].branch()) |
|
323 | 323 | if len(branches) > 1: |
|
324 | 324 | raise error.Abort(_('cannot collapse multiple named ' |
|
325 | 325 | 'branches')) |
|
326 | 326 | |
|
327 | 327 | # Rebase |
|
328 | 328 | if not self.targetancestors: |
|
329 | 329 | self.targetancestors = repo.changelog.ancestors([self.target], |
|
330 | 330 | inclusive=True) |
|
331 | 331 | |
|
332 | 332 | # Keep track of the current bookmarks in order to reset them later |
|
333 | 333 | self.currentbookmarks = repo._bookmarks.copy() |
|
334 | 334 | self.activebookmark = self.activebookmark or repo._activebookmark |
|
335 | 335 | if self.activebookmark: |
|
336 | 336 | bookmarks.deactivate(repo) |
|
337 | 337 | |
|
338 | 338 | sortedrevs = repo.revs('sort(%ld, -topo)', self.state) |
|
339 | 339 | cands = [k for k, v in self.state.iteritems() if v == revtodo] |
|
340 | 340 | total = len(cands) |
|
341 | 341 | pos = 0 |
|
342 | 342 | for rev in sortedrevs: |
|
343 | 343 | ctx = repo[rev] |
|
344 | 344 | desc = '%d:%s "%s"' % (ctx.rev(), ctx, |
|
345 | 345 | ctx.description().split('\n', 1)[0]) |
|
346 | 346 | names = repo.nodetags(ctx.node()) + repo.nodebookmarks(ctx.node()) |
|
347 | 347 | if names: |
|
348 | 348 | desc += ' (%s)' % ' '.join(names) |
|
349 | 349 | if self.state[rev] == revtodo: |
|
350 | 350 | pos += 1 |
|
351 | 351 | ui.status(_('rebasing %s\n') % desc) |
|
352 | 352 | ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, ctx)), |
|
353 | 353 | _('changesets'), total) |
|
354 | 354 | p1, p2, base = defineparents(repo, rev, self.target, |
|
355 | 355 | self.state, |
|
356 | 356 | self.targetancestors, |
|
357 | 357 | self.obsoletenotrebased) |
|
358 | 358 | storestatus(repo, self.originalwd, self.target, |
|
359 | 359 | self.state, self.collapsef, self.keepf, |
|
360 | 360 | self.keepbranchesf, self.external, |
|
361 | 361 | self.activebookmark) |
|
362 | 362 | storecollapsemsg(repo, self.collapsemsg) |
|
363 | 363 | if len(repo[None].parents()) == 2: |
|
364 | 364 | repo.ui.debug('resuming interrupted rebase\n') |
|
365 | 365 | else: |
|
366 | 366 | try: |
|
367 | 367 | ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), |
|
368 | 368 | 'rebase') |
|
369 | 369 | stats = rebasenode(repo, rev, p1, base, self.state, |
|
370 | 370 | self.collapsef, self.target) |
|
371 | 371 | if stats and stats[3] > 0: |
|
372 | 372 | raise error.InterventionRequired( |
|
373 | 373 | _('unresolved conflicts (see hg ' |
|
374 | 374 | 'resolve, then hg rebase --continue)')) |
|
375 | 375 | finally: |
|
376 | 376 | ui.setconfig('ui', 'forcemerge', '', 'rebase') |
|
377 | 377 | if not self.collapsef: |
|
378 | 378 | merging = p2 != nullrev |
|
379 | 379 | editform = cmdutil.mergeeditform(merging, 'rebase') |
|
380 | 380 | editor = cmdutil.getcommiteditor(editform=editform, **opts) |
|
381 | 381 | newnode = concludenode(repo, rev, p1, p2, |
|
382 | 382 | extrafn=_makeextrafn(self.extrafns), |
|
383 | 383 | editor=editor, |
|
384 | 384 | keepbranches=self.keepbranchesf, |
|
385 | 385 | date=self.date) |
|
386 | 386 | else: |
|
387 | 387 | # Skip commit if we are collapsing |
|
388 | 388 | repo.dirstate.beginparentchange() |
|
389 | 389 | repo.setparents(repo[p1].node()) |
|
390 | 390 | repo.dirstate.endparentchange() |
|
391 | 391 | newnode = None |
|
392 | 392 | # Update the state |
|
393 | 393 | if newnode is not None: |
|
394 | 394 | self.state[rev] = repo[newnode].rev() |
|
395 | 395 | ui.debug('rebased as %s\n' % short(newnode)) |
|
396 | 396 | else: |
|
397 | 397 | if not self.collapsef: |
|
398 | 398 | ui.warn(_('note: rebase of %d:%s created no changes ' |
|
399 | 399 | 'to commit\n') % (rev, ctx)) |
|
400 | 400 | self.skipped.add(rev) |
|
401 | 401 | self.state[rev] = p1 |
|
402 | 402 | ui.debug('next revision set to %s\n' % p1) |
|
403 | 403 | elif self.state[rev] == nullmerge: |
|
404 | 404 | ui.debug('ignoring null merge rebase of %s\n' % rev) |
|
405 | 405 | elif self.state[rev] == revignored: |
|
406 | 406 | ui.status(_('not rebasing ignored %s\n') % desc) |
|
407 | 407 | elif self.state[rev] == revprecursor: |
|
408 | 408 | targetctx = repo[self.obsoletenotrebased[rev]] |
|
409 | 409 | desctarget = '%d:%s "%s"' % (targetctx.rev(), targetctx, |
|
410 | 410 | targetctx.description().split('\n', 1)[0]) |
|
411 | 411 | msg = _('note: not rebasing %s, already in destination as %s\n') |
|
412 | 412 | ui.status(msg % (desc, desctarget)) |
|
413 | 413 | elif self.state[rev] == revpruned: |
|
414 | 414 | msg = _('note: not rebasing %s, it has no successor\n') |
|
415 | 415 | ui.status(msg % desc) |
|
416 | 416 | else: |
|
417 | 417 | ui.status(_('already rebased %s as %s\n') % |
|
418 | 418 | (desc, repo[self.state[rev]])) |
|
419 | 419 | |
|
420 | 420 | ui.progress(_('rebasing'), None) |
|
421 | 421 | ui.note(_('rebase merging completed\n')) |
|
422 | 422 | |
|
423 | 423 | def _finishrebase(self): |
|
424 | 424 | repo, ui, opts = self.repo, self.ui, self.opts |
|
425 | 425 | if self.collapsef and not self.keepopen: |
|
426 | 426 | p1, p2, _base = defineparents(repo, min(self.state), |
|
427 | 427 | self.target, self.state, |
|
428 | 428 | self.targetancestors, |
|
429 | 429 | self.obsoletenotrebased) |
|
430 | 430 | editopt = opts.get('edit') |
|
431 | 431 | editform = 'rebase.collapse' |
|
432 | 432 | if self.collapsemsg: |
|
433 | 433 | commitmsg = self.collapsemsg |
|
434 | 434 | else: |
|
435 | 435 | commitmsg = 'Collapsed revision' |
|
436 | 436 | for rebased in self.state: |
|
437 | 437 | if rebased not in self.skipped and\ |
|
438 | 438 | self.state[rebased] > nullmerge: |
|
439 | 439 | commitmsg += '\n* %s' % repo[rebased].description() |
|
440 | 440 | editopt = True |
|
441 | 441 | editor = cmdutil.getcommiteditor(edit=editopt, editform=editform) |
|
442 | 442 | revtoreuse = max(self.state) |
|
443 | 443 | newnode = concludenode(repo, revtoreuse, p1, self.external, |
|
444 | 444 | commitmsg=commitmsg, |
|
445 | 445 | extrafn=_makeextrafn(self.extrafns), |
|
446 | 446 | editor=editor, |
|
447 | 447 | keepbranches=self.keepbranchesf, |
|
448 | 448 | date=self.date) |
|
449 | 449 | if newnode is None: |
|
450 | 450 | newrev = self.target |
|
451 | 451 | else: |
|
452 | 452 | newrev = repo[newnode].rev() |
|
453 | 453 | for oldrev in self.state.iterkeys(): |
|
454 | 454 | if self.state[oldrev] > nullmerge: |
|
455 | 455 | self.state[oldrev] = newrev |
|
456 | 456 | |
|
457 | 457 | if 'qtip' in repo.tags(): |
|
458 | 458 | updatemq(repo, self.state, self.skipped, **opts) |
|
459 | 459 | |
|
460 | 460 | if self.currentbookmarks: |
|
461 | 461 | # Nodeids are needed to reset bookmarks |
|
462 | 462 | nstate = {} |
|
463 | 463 | for k, v in self.state.iteritems(): |
|
464 | 464 | if v > nullmerge: |
|
465 | 465 | nstate[repo[k].node()] = repo[v].node() |
|
466 | 466 | elif v == revprecursor: |
|
467 | 467 | succ = self.obsoletenotrebased[k] |
|
468 | 468 | nstate[repo[k].node()] = repo[succ].node() |
|
469 | 469 | # XXX this is the same as dest.node() for the non-continue path -- |
|
470 | 470 | # this should probably be cleaned up |
|
471 | 471 | targetnode = repo[self.target].node() |
|
472 | 472 | |
|
473 | 473 | # restore original working directory |
|
474 | 474 | # (we do this before stripping) |
|
475 | 475 | newwd = self.state.get(self.originalwd, self.originalwd) |
|
476 | 476 | if newwd == revprecursor: |
|
477 | 477 | newwd = self.obsoletenotrebased[self.originalwd] |
|
478 | 478 | elif newwd < 0: |
|
479 | 479 | # original directory is a parent of rebase set root or ignored |
|
480 | 480 | newwd = self.originalwd |
|
481 | 481 | if newwd not in [c.rev() for c in repo[None].parents()]: |
|
482 | 482 | ui.note(_("update back to initial working directory parent\n")) |
|
483 | 483 | hg.updaterepo(repo, newwd, False) |
|
484 | 484 | |
|
485 | 485 | if not self.keepf: |
|
486 | 486 | collapsedas = None |
|
487 | 487 | if self.collapsef: |
|
488 | 488 | collapsedas = newnode |
|
489 | 489 | clearrebased(ui, repo, self.state, self.skipped, collapsedas) |
|
490 | 490 | |
|
491 | 491 | with repo.transaction('bookmark') as tr: |
|
492 | 492 | if self.currentbookmarks: |
|
493 | 493 | updatebookmarks(repo, targetnode, nstate, |
|
494 | 494 | self.currentbookmarks, tr) |
|
495 | 495 | if self.activebookmark not in repo._bookmarks: |
|
496 | 496 | # active bookmark was divergent one and has been deleted |
|
497 | 497 | self.activebookmark = None |
|
498 | 498 | clearstatus(repo) |
|
499 | 499 | clearcollapsemsg(repo) |
|
500 | 500 | |
|
501 | 501 | ui.note(_("rebase completed\n")) |
|
502 | 502 | util.unlinkpath(repo.sjoin('undo'), ignoremissing=True) |
|
503 | 503 | if self.skipped: |
|
504 | 504 | skippedlen = len(self.skipped) |
|
505 | 505 | ui.note(_("%d revisions have been skipped\n") % skippedlen) |
|
506 | 506 | |
|
507 | 507 | if (self.activebookmark and |
|
508 | 508 | repo['.'].node() == repo._bookmarks[self.activebookmark]): |
|
509 | 509 | bookmarks.activate(repo, self.activebookmark) |
|
510 | 510 | |
|
511 | 511 | @command('rebase', |
|
512 | 512 | [('s', 'source', '', |
|
513 | 513 | _('rebase the specified changeset and descendants'), _('REV')), |
|
514 | 514 | ('b', 'base', '', |
|
515 | 515 | _('rebase everything from branching point of specified changeset'), |
|
516 | 516 | _('REV')), |
|
517 | 517 | ('r', 'rev', [], |
|
518 | 518 | _('rebase these revisions'), |
|
519 | 519 | _('REV')), |
|
520 | 520 | ('d', 'dest', '', |
|
521 | 521 | _('rebase onto the specified changeset'), _('REV')), |
|
522 | 522 | ('', 'collapse', False, _('collapse the rebased changesets')), |
|
523 | 523 | ('m', 'message', '', |
|
524 | 524 | _('use text as collapse commit message'), _('TEXT')), |
|
525 | 525 | ('e', 'edit', False, _('invoke editor on commit messages')), |
|
526 | 526 | ('l', 'logfile', '', |
|
527 | 527 | _('read collapse commit message from file'), _('FILE')), |
|
528 | 528 | ('k', 'keep', False, _('keep original changesets')), |
|
529 | 529 | ('', 'keepbranches', False, _('keep original branch names')), |
|
530 | 530 | ('D', 'detach', False, _('(DEPRECATED)')), |
|
531 | 531 | ('i', 'interactive', False, _('(DEPRECATED)')), |
|
532 | 532 | ('t', 'tool', '', _('specify merge tool')), |
|
533 | 533 | ('c', 'continue', False, _('continue an interrupted rebase')), |
|
534 | 534 | ('a', 'abort', False, _('abort an interrupted rebase'))] + |
|
535 | 535 | templateopts, |
|
536 | 536 | _('[-s REV | -b REV] [-d REV] [OPTION]')) |
|
537 | 537 | def rebase(ui, repo, **opts): |
|
538 | 538 | """move changeset (and descendants) to a different branch |
|
539 | 539 | |
|
540 | 540 | Rebase uses repeated merging to graft changesets from one part of |
|
541 | 541 | history (the source) onto another (the destination). This can be |
|
542 | 542 | useful for linearizing *local* changes relative to a master |
|
543 | 543 | development tree. |
|
544 | 544 | |
|
545 | 545 | Published commits cannot be rebased (see :hg:`help phases`). |
|
546 | 546 | To copy commits, see :hg:`help graft`. |
|
547 | 547 | |
|
548 | 548 | If you don't specify a destination changeset (``-d/--dest``), rebase |
|
549 | 549 | will use the same logic as :hg:`merge` to pick a destination. if |
|
550 | 550 | the current branch contains exactly one other head, the other head |
|
551 | 551 | is merged with by default. Otherwise, an explicit revision with |
|
552 | 552 | which to merge with must be provided. (destination changeset is not |
|
553 | 553 | modified by rebasing, but new changesets are added as its |
|
554 | 554 | descendants.) |
|
555 | 555 | |
|
556 | 556 | Here are the ways to select changesets: |
|
557 | 557 | |
|
558 | 558 | 1. Explicitly select them using ``--rev``. |
|
559 | 559 | |
|
560 | 560 | 2. Use ``--source`` to select a root changeset and include all of its |
|
561 | 561 | descendants. |
|
562 | 562 | |
|
563 | 563 | 3. Use ``--base`` to select a changeset; rebase will find ancestors |
|
564 | 564 | and their descendants which are not also ancestors of the destination. |
|
565 | 565 | |
|
566 | 566 | 4. If you do not specify any of ``--rev``, ``source``, or ``--base``, |
|
567 | 567 | rebase will use ``--base .`` as above. |
|
568 | 568 | |
|
569 | 569 | Rebase will destroy original changesets unless you use ``--keep``. |
|
570 | 570 | It will also move your bookmarks (even if you do). |
|
571 | 571 | |
|
572 | 572 | Some changesets may be dropped if they do not contribute changes |
|
573 | 573 | (e.g. merges from the destination branch). |
|
574 | 574 | |
|
575 | 575 | Unlike ``merge``, rebase will do nothing if you are at the branch tip of |
|
576 | 576 | a named branch with two heads. You will need to explicitly specify source |
|
577 | 577 | and/or destination. |
|
578 | 578 | |
|
579 | 579 | If you need to use a tool to automate merge/conflict decisions, you |
|
580 | 580 | can specify one with ``--tool``, see :hg:`help merge-tools`. |
|
581 | 581 | As a caveat: the tool will not be used to mediate when a file was |
|
582 | 582 | deleted, there is no hook presently available for this. |
|
583 | 583 | |
|
584 | 584 | If a rebase is interrupted to manually resolve a conflict, it can be |
|
585 | 585 | continued with --continue/-c or aborted with --abort/-a. |
|
586 | 586 | |
|
587 | 587 | .. container:: verbose |
|
588 | 588 | |
|
589 | 589 | Examples: |
|
590 | 590 | |
|
591 | 591 | - move "local changes" (current commit back to branching point) |
|
592 | 592 | to the current branch tip after a pull:: |
|
593 | 593 | |
|
594 | 594 | hg rebase |
|
595 | 595 | |
|
596 | 596 | - move a single changeset to the stable branch:: |
|
597 | 597 | |
|
598 | 598 | hg rebase -r 5f493448 -d stable |
|
599 | 599 | |
|
600 | 600 | - splice a commit and all its descendants onto another part of history:: |
|
601 | 601 | |
|
602 | 602 | hg rebase --source c0c3 --dest 4cf9 |
|
603 | 603 | |
|
604 | 604 | - rebase everything on a branch marked by a bookmark onto the |
|
605 | 605 | default branch:: |
|
606 | 606 | |
|
607 | 607 | hg rebase --base myfeature --dest default |
|
608 | 608 | |
|
609 | 609 | - collapse a sequence of changes into a single commit:: |
|
610 | 610 | |
|
611 | 611 | hg rebase --collapse -r 1520:1525 -d . |
|
612 | 612 | |
|
613 | 613 | - move a named branch while preserving its name:: |
|
614 | 614 | |
|
615 | 615 | hg rebase -r "branch(featureX)" -d 1.3 --keepbranches |
|
616 | 616 | |
|
617 | 617 | Returns 0 on success, 1 if nothing to rebase or there are |
|
618 | 618 | unresolved conflicts. |
|
619 | 619 | |
|
620 | 620 | """ |
|
621 | 621 | rbsrt = rebaseruntime(repo, ui, opts) |
|
622 | 622 | |
|
623 | 623 | lock = wlock = None |
|
624 | 624 | try: |
|
625 | 625 | wlock = repo.wlock() |
|
626 | 626 | lock = repo.lock() |
|
627 | 627 | |
|
628 | 628 | # Validate input and define rebasing points |
|
629 | 629 | destf = opts.get('dest', None) |
|
630 | 630 | srcf = opts.get('source', None) |
|
631 | 631 | basef = opts.get('base', None) |
|
632 | 632 | revf = opts.get('rev', []) |
|
633 | 633 | # search default destination in this space |
|
634 | 634 | # used in the 'hg pull --rebase' case, see issue 5214. |
|
635 | 635 | destspace = opts.get('_destspace') |
|
636 | 636 | contf = opts.get('continue') |
|
637 | 637 | abortf = opts.get('abort') |
|
638 | 638 | if opts.get('interactive'): |
|
639 | 639 | try: |
|
640 | 640 | if extensions.find('histedit'): |
|
641 | 641 | enablehistedit = '' |
|
642 | 642 | except KeyError: |
|
643 | 643 | enablehistedit = " --config extensions.histedit=" |
|
644 | 644 | help = "hg%s help -e histedit" % enablehistedit |
|
645 | 645 | msg = _("interactive history editing is supported by the " |
|
646 | 646 | "'histedit' extension (see \"%s\")") % help |
|
647 | 647 | raise error.Abort(msg) |
|
648 | 648 | |
|
649 | 649 | if rbsrt.collapsemsg and not rbsrt.collapsef: |
|
650 | 650 | raise error.Abort( |
|
651 | 651 | _('message can only be specified with collapse')) |
|
652 | 652 | |
|
653 | 653 | if contf or abortf: |
|
654 | 654 | if contf and abortf: |
|
655 | 655 | raise error.Abort(_('cannot use both abort and continue')) |
|
656 | 656 | if rbsrt.collapsef: |
|
657 | 657 | raise error.Abort( |
|
658 | 658 | _('cannot use collapse with continue or abort')) |
|
659 | 659 | if srcf or basef or destf: |
|
660 | 660 | raise error.Abort( |
|
661 | 661 | _('abort and continue do not allow specifying revisions')) |
|
662 | 662 | if abortf and opts.get('tool', False): |
|
663 | 663 | ui.warn(_('tool option will be ignored\n')) |
|
664 | 664 | if contf: |
|
665 | 665 | ms = mergemod.mergestate.read(repo) |
|
666 | 666 | cmdutil.checkunresolved(ms) |
|
667 | 667 | |
|
668 | 668 | retcode = rbsrt._prepareabortorcontinue(abortf) |
|
669 | 669 | if retcode is not None: |
|
670 | 670 | return retcode |
|
671 | 671 | else: |
|
672 | 672 | dest, rebaseset = _definesets(ui, repo, destf, srcf, basef, revf, |
|
673 | 673 | destspace=destspace) |
|
674 | 674 | retcode = rbsrt._preparenewrebase(dest, rebaseset) |
|
675 | 675 | if retcode is not None: |
|
676 | 676 | return retcode |
|
677 | 677 | |
|
678 | 678 | rbsrt._performrebase() |
|
679 | 679 | rbsrt._finishrebase() |
|
680 | 680 | finally: |
|
681 | 681 | release(lock, wlock) |
|
682 | 682 | |
|
683 | 683 | def _definesets(ui, repo, destf=None, srcf=None, basef=None, revf=[], |
|
684 | 684 | destspace=None): |
|
685 | 685 | """use revisions argument to define destination and rebase set |
|
686 | 686 | """ |
|
687 | 687 | # destspace is here to work around issues with `hg pull --rebase` see |
|
688 | 688 | # issue5214 for details |
|
689 | 689 | if srcf and basef: |
|
690 | 690 | raise error.Abort(_('cannot specify both a source and a base')) |
|
691 | 691 | if revf and basef: |
|
692 | 692 | raise error.Abort(_('cannot specify both a revision and a base')) |
|
693 | 693 | if revf and srcf: |
|
694 | 694 | raise error.Abort(_('cannot specify both a revision and a source')) |
|
695 | 695 | |
|
696 | 696 | cmdutil.checkunfinished(repo) |
|
697 | 697 | cmdutil.bailifchanged(repo) |
|
698 | 698 | |
|
699 | 699 | if destf: |
|
700 | 700 | dest = scmutil.revsingle(repo, destf) |
|
701 | 701 | |
|
702 | 702 | if revf: |
|
703 | 703 | rebaseset = scmutil.revrange(repo, revf) |
|
704 | 704 | if not rebaseset: |
|
705 | 705 | ui.status(_('empty "rev" revision set - nothing to rebase\n')) |
|
706 | 706 | return None, None |
|
707 | 707 | elif srcf: |
|
708 | 708 | src = scmutil.revrange(repo, [srcf]) |
|
709 | 709 | if not src: |
|
710 | 710 | ui.status(_('empty "source" revision set - nothing to rebase\n')) |
|
711 | 711 | return None, None |
|
712 | 712 | rebaseset = repo.revs('(%ld)::', src) |
|
713 | 713 | assert rebaseset |
|
714 | 714 | else: |
|
715 | 715 | base = scmutil.revrange(repo, [basef or '.']) |
|
716 | 716 | if not base: |
|
717 | 717 | ui.status(_('empty "base" revision set - ' |
|
718 | 718 | "can't compute rebase set\n")) |
|
719 | 719 | return None, None |
|
720 | 720 | if not destf: |
|
721 | 721 | dest = repo[_destrebase(repo, base, destspace=destspace)] |
|
722 | 722 | destf = str(dest) |
|
723 | 723 | |
|
724 | 724 | commonanc = repo.revs('ancestor(%ld, %d)', base, dest).first() |
|
725 | 725 | if commonanc is not None: |
|
726 | 726 | rebaseset = repo.revs('(%d::(%ld) - %d)::', |
|
727 | 727 | commonanc, base, commonanc) |
|
728 | 728 | else: |
|
729 | 729 | rebaseset = [] |
|
730 | 730 | |
|
731 | 731 | if not rebaseset: |
|
732 | 732 | # transform to list because smartsets are not comparable to |
|
733 | 733 | # lists. This should be improved to honor laziness of |
|
734 | 734 | # smartset. |
|
735 | 735 | if list(base) == [dest.rev()]: |
|
736 | 736 | if basef: |
|
737 | 737 | ui.status(_('nothing to rebase - %s is both "base"' |
|
738 | 738 | ' and destination\n') % dest) |
|
739 | 739 | else: |
|
740 | 740 | ui.status(_('nothing to rebase - working directory ' |
|
741 | 741 | 'parent is also destination\n')) |
|
742 | 742 | elif not repo.revs('%ld - ::%d', base, dest): |
|
743 | 743 | if basef: |
|
744 | 744 | ui.status(_('nothing to rebase - "base" %s is ' |
|
745 | 745 | 'already an ancestor of destination ' |
|
746 | 746 | '%s\n') % |
|
747 | 747 | ('+'.join(str(repo[r]) for r in base), |
|
748 | 748 | dest)) |
|
749 | 749 | else: |
|
750 | 750 | ui.status(_('nothing to rebase - working ' |
|
751 | 751 | 'directory parent is already an ' |
|
752 | 752 | 'ancestor of destination %s\n') % dest) |
|
753 | 753 | else: # can it happen? |
|
754 | 754 | ui.status(_('nothing to rebase from %s to %s\n') % |
|
755 | 755 | ('+'.join(str(repo[r]) for r in base), dest)) |
|
756 | 756 | return None, None |
|
757 | 757 | |
|
758 | 758 | if not destf: |
|
759 | 759 | dest = repo[_destrebase(repo, rebaseset, destspace=destspace)] |
|
760 | 760 | destf = str(dest) |
|
761 | 761 | |
|
762 | 762 | return dest, rebaseset |
|
763 | 763 | |
|
764 | 764 | def externalparent(repo, state, targetancestors): |
|
765 | 765 | """Return the revision that should be used as the second parent |
|
766 | 766 | when the revisions in state is collapsed on top of targetancestors. |
|
767 | 767 | Abort if there is more than one parent. |
|
768 | 768 | """ |
|
769 | 769 | parents = set() |
|
770 | 770 | source = min(state) |
|
771 | 771 | for rev in state: |
|
772 | 772 | if rev == source: |
|
773 | 773 | continue |
|
774 | 774 | for p in repo[rev].parents(): |
|
775 | 775 | if (p.rev() not in state |
|
776 | 776 | and p.rev() not in targetancestors): |
|
777 | 777 | parents.add(p.rev()) |
|
778 | 778 | if not parents: |
|
779 | 779 | return nullrev |
|
780 | 780 | if len(parents) == 1: |
|
781 | 781 | return parents.pop() |
|
782 | 782 | raise error.Abort(_('unable to collapse on top of %s, there is more ' |
|
783 | 783 | 'than one external parent: %s') % |
|
784 | 784 | (max(targetancestors), |
|
785 | 785 | ', '.join(str(p) for p in sorted(parents)))) |
|
786 | 786 | |
|
787 | 787 | def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None, |
|
788 | 788 | keepbranches=False, date=None): |
|
789 | 789 | '''Commit the wd changes with parents p1 and p2. Reuse commit info from rev |
|
790 | 790 | but also store useful information in extra. |
|
791 | 791 | Return node of committed revision.''' |
|
792 | 792 | dsguard = cmdutil.dirstateguard(repo, 'rebase') |
|
793 | 793 | try: |
|
794 | 794 | repo.setparents(repo[p1].node(), repo[p2].node()) |
|
795 | 795 | ctx = repo[rev] |
|
796 | 796 | if commitmsg is None: |
|
797 | 797 | commitmsg = ctx.description() |
|
798 | 798 | keepbranch = keepbranches and repo[p1].branch() != ctx.branch() |
|
799 | 799 | extra = {'rebase_source': ctx.hex()} |
|
800 | 800 | if extrafn: |
|
801 | 801 | extrafn(ctx, extra) |
|
802 | 802 | |
|
803 | 803 | backup = repo.ui.backupconfig('phases', 'new-commit') |
|
804 | 804 | try: |
|
805 | 805 | targetphase = max(ctx.phase(), phases.draft) |
|
806 | 806 | repo.ui.setconfig('phases', 'new-commit', targetphase, 'rebase') |
|
807 | 807 | if keepbranch: |
|
808 | 808 | repo.ui.setconfig('ui', 'allowemptycommit', True) |
|
809 | 809 | # Commit might fail if unresolved files exist |
|
810 | 810 | if date is None: |
|
811 | 811 | date = ctx.date() |
|
812 | 812 | newnode = repo.commit(text=commitmsg, user=ctx.user(), |
|
813 | 813 | date=date, extra=extra, editor=editor) |
|
814 | 814 | finally: |
|
815 | 815 | repo.ui.restoreconfig(backup) |
|
816 | 816 | |
|
817 | 817 | repo.dirstate.setbranch(repo[newnode].branch()) |
|
818 | 818 | dsguard.close() |
|
819 | 819 | return newnode |
|
820 | 820 | finally: |
|
821 | 821 | release(dsguard) |
|
822 | 822 | |
|
823 | 823 | def rebasenode(repo, rev, p1, base, state, collapse, target): |
|
824 | 824 | 'Rebase a single revision rev on top of p1 using base as merge ancestor' |
|
825 | 825 | # Merge phase |
|
826 | 826 | # Update to target and merge it with local |
|
827 | 827 | if repo['.'].rev() != p1: |
|
828 | 828 | repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1])) |
|
829 | 829 | mergemod.update(repo, p1, False, True) |
|
830 | 830 | else: |
|
831 | 831 | repo.ui.debug(" already in target\n") |
|
832 | 832 | repo.dirstate.write(repo.currenttransaction()) |
|
833 | 833 | repo.ui.debug(" merge against %d:%s\n" % (rev, repo[rev])) |
|
834 | 834 | if base is not None: |
|
835 | 835 | repo.ui.debug(" detach base %d:%s\n" % (base, repo[base])) |
|
836 | 836 | # When collapsing in-place, the parent is the common ancestor, we |
|
837 | 837 | # have to allow merging with it. |
|
838 | 838 | stats = mergemod.update(repo, rev, True, True, base, collapse, |
|
839 | 839 | labels=['dest', 'source']) |
|
840 | 840 | if collapse: |
|
841 | 841 | copies.duplicatecopies(repo, rev, target) |
|
842 | 842 | else: |
|
843 | 843 | # If we're not using --collapse, we need to |
|
844 | 844 | # duplicate copies between the revision we're |
|
845 | 845 | # rebasing and its first parent, but *not* |
|
846 | 846 | # duplicate any copies that have already been |
|
847 | 847 | # performed in the destination. |
|
848 | 848 | p1rev = repo[rev].p1().rev() |
|
849 | 849 | copies.duplicatecopies(repo, rev, p1rev, skiprev=target) |
|
850 | 850 | return stats |
|
851 | 851 | |
|
852 | 852 | def nearestrebased(repo, rev, state): |
|
853 | 853 | """return the nearest ancestors of rev in the rebase result""" |
|
854 | 854 | rebased = [r for r in state if state[r] > nullmerge] |
|
855 | 855 | candidates = repo.revs('max(%ld and (::%d))', rebased, rev) |
|
856 | 856 | if candidates: |
|
857 | 857 | return state[candidates.first()] |
|
858 | 858 | else: |
|
859 | 859 | return None |
|
860 | 860 | |
|
861 | 861 | def _checkobsrebase(repo, ui, |
|
862 | 862 | rebaseobsrevs, |
|
863 | 863 | rebasesetrevs, |
|
864 | 864 | rebaseobsskipped): |
|
865 | 865 | """ |
|
866 | 866 | Abort if rebase will create divergence or rebase is noop because of markers |
|
867 | 867 | |
|
868 | 868 | `rebaseobsrevs`: set of obsolete revision in source |
|
869 | 869 | `rebasesetrevs`: set of revisions to be rebased from source |
|
870 | 870 | `rebaseobsskipped`: set of revisions from source skipped because they have |
|
871 | 871 | successors in destination |
|
872 | 872 | """ |
|
873 | 873 | # Obsolete node with successors not in dest leads to divergence |
|
874 | 874 | divergenceok = ui.configbool('experimental', |
|
875 | 875 | 'allowdivergence') |
|
876 | 876 | divergencebasecandidates = rebaseobsrevs - rebaseobsskipped |
|
877 | 877 | |
|
878 | 878 | if divergencebasecandidates and not divergenceok: |
|
879 | 879 | divhashes = (str(repo[r]) |
|
880 | 880 | for r in divergencebasecandidates) |
|
881 | 881 | msg = _("this rebase will cause " |
|
882 | 882 | "divergences from: %s") |
|
883 | 883 | h = _("to force the rebase please set " |
|
884 | 884 | "experimental.allowdivergence=True") |
|
885 | 885 | raise error.Abort(msg % (",".join(divhashes),), hint=h) |
|
886 | 886 | |
|
887 | 887 | def defineparents(repo, rev, target, state, targetancestors, |
|
888 | 888 | obsoletenotrebased): |
|
889 | 889 | 'Return the new parent relationship of the revision that will be rebased' |
|
890 | 890 | parents = repo[rev].parents() |
|
891 | 891 | p1 = p2 = nullrev |
|
892 | 892 | rp1 = None |
|
893 | 893 | |
|
894 | 894 | p1n = parents[0].rev() |
|
895 | 895 | if p1n in targetancestors: |
|
896 | 896 | p1 = target |
|
897 | 897 | elif p1n in state: |
|
898 | 898 | if state[p1n] == nullmerge: |
|
899 | 899 | p1 = target |
|
900 | 900 | elif state[p1n] in revskipped: |
|
901 | 901 | p1 = nearestrebased(repo, p1n, state) |
|
902 | 902 | if p1 is None: |
|
903 | 903 | p1 = target |
|
904 | 904 | else: |
|
905 | 905 | p1 = state[p1n] |
|
906 | 906 | else: # p1n external |
|
907 | 907 | p1 = target |
|
908 | 908 | p2 = p1n |
|
909 | 909 | |
|
910 | 910 | if len(parents) == 2 and parents[1].rev() not in targetancestors: |
|
911 | 911 | p2n = parents[1].rev() |
|
912 | 912 | # interesting second parent |
|
913 | 913 | if p2n in state: |
|
914 | 914 | if p1 == target: # p1n in targetancestors or external |
|
915 | 915 | p1 = state[p2n] |
|
916 | 916 | if p1 == revprecursor: |
|
917 | 917 | rp1 = obsoletenotrebased[p2n] |
|
918 | 918 | elif state[p2n] in revskipped: |
|
919 | 919 | p2 = nearestrebased(repo, p2n, state) |
|
920 | 920 | if p2 is None: |
|
921 | 921 | # no ancestors rebased yet, detach |
|
922 | 922 | p2 = target |
|
923 | 923 | else: |
|
924 | 924 | p2 = state[p2n] |
|
925 | 925 | else: # p2n external |
|
926 | 926 | if p2 != nullrev: # p1n external too => rev is a merged revision |
|
927 | 927 | raise error.Abort(_('cannot use revision %d as base, result ' |
|
928 | 928 | 'would have 3 parents') % rev) |
|
929 | 929 | p2 = p2n |
|
930 | 930 | repo.ui.debug(" future parents are %d and %d\n" % |
|
931 | 931 | (repo[rp1 or p1].rev(), repo[p2].rev())) |
|
932 | 932 | |
|
933 | 933 | if not any(p.rev() in state for p in parents): |
|
934 | 934 | # Case (1) root changeset of a non-detaching rebase set. |
|
935 | 935 | # Let the merge mechanism find the base itself. |
|
936 | 936 | base = None |
|
937 | 937 | elif not repo[rev].p2(): |
|
938 | 938 | # Case (2) detaching the node with a single parent, use this parent |
|
939 | 939 | base = repo[rev].p1().rev() |
|
940 | 940 | else: |
|
941 | 941 | # Assuming there is a p1, this is the case where there also is a p2. |
|
942 | 942 | # We are thus rebasing a merge and need to pick the right merge base. |
|
943 | 943 | # |
|
944 | 944 | # Imagine we have: |
|
945 | 945 | # - M: current rebase revision in this step |
|
946 | 946 | # - A: one parent of M |
|
947 | 947 | # - B: other parent of M |
|
948 | 948 | # - D: destination of this merge step (p1 var) |
|
949 | 949 | # |
|
950 | 950 | # Consider the case where D is a descendant of A or B and the other is |
|
951 | 951 | # 'outside'. In this case, the right merge base is the D ancestor. |
|
952 | 952 | # |
|
953 | 953 | # An informal proof, assuming A is 'outside' and B is the D ancestor: |
|
954 | 954 | # |
|
955 | 955 | # If we pick B as the base, the merge involves: |
|
956 | 956 | # - changes from B to M (actual changeset payload) |
|
957 | 957 | # - changes from B to D (induced by rebase) as D is a rebased |
|
958 | 958 | # version of B) |
|
959 | 959 | # Which exactly represent the rebase operation. |
|
960 | 960 | # |
|
961 | 961 | # If we pick A as the base, the merge involves: |
|
962 | 962 | # - changes from A to M (actual changeset payload) |
|
963 | 963 | # - changes from A to D (with include changes between unrelated A and B |
|
964 | 964 | # plus changes induced by rebase) |
|
965 | 965 | # Which does not represent anything sensible and creates a lot of |
|
966 | 966 | # conflicts. A is thus not the right choice - B is. |
|
967 | 967 | # |
|
968 | 968 | # Note: The base found in this 'proof' is only correct in the specified |
|
969 | 969 | # case. This base does not make sense if is not D a descendant of A or B |
|
970 | 970 | # or if the other is not parent 'outside' (especially not if the other |
|
971 | 971 | # parent has been rebased). The current implementation does not |
|
972 | 972 | # make it feasible to consider different cases separately. In these |
|
973 | 973 | # other cases we currently just leave it to the user to correctly |
|
974 | 974 | # resolve an impossible merge using a wrong ancestor. |
|
975 | 975 | # |
|
976 | 976 | # xx, p1 could be -4, and both parents could probably be -4... |
|
977 | 977 | for p in repo[rev].parents(): |
|
978 | 978 | if state.get(p.rev()) == p1: |
|
979 | 979 | base = p.rev() |
|
980 | 980 | break |
|
981 | 981 | else: # fallback when base not found |
|
982 | 982 | base = None |
|
983 | 983 | |
|
984 | 984 | # Raise because this function is called wrong (see issue 4106) |
|
985 | 985 | raise AssertionError('no base found to rebase on ' |
|
986 | 986 | '(defineparents called wrong)') |
|
987 | 987 | return rp1 or p1, p2, base |
|
988 | 988 | |
|
989 | 989 | def isagitpatch(repo, patchname): |
|
990 | 990 | 'Return true if the given patch is in git format' |
|
991 | 991 | mqpatch = os.path.join(repo.mq.path, patchname) |
|
992 | 992 | for line in patch.linereader(file(mqpatch, 'rb')): |
|
993 | 993 | if line.startswith('diff --git'): |
|
994 | 994 | return True |
|
995 | 995 | return False |
|
996 | 996 | |
|
997 | 997 | def updatemq(repo, state, skipped, **opts): |
|
998 | 998 | 'Update rebased mq patches - finalize and then import them' |
|
999 | 999 | mqrebase = {} |
|
1000 | 1000 | mq = repo.mq |
|
1001 | 1001 | original_series = mq.fullseries[:] |
|
1002 | 1002 | skippedpatches = set() |
|
1003 | 1003 | |
|
1004 | 1004 | for p in mq.applied: |
|
1005 | 1005 | rev = repo[p.node].rev() |
|
1006 | 1006 | if rev in state: |
|
1007 | 1007 | repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' % |
|
1008 | 1008 | (rev, p.name)) |
|
1009 | 1009 | mqrebase[rev] = (p.name, isagitpatch(repo, p.name)) |
|
1010 | 1010 | else: |
|
1011 | 1011 | # Applied but not rebased, not sure this should happen |
|
1012 | 1012 | skippedpatches.add(p.name) |
|
1013 | 1013 | |
|
1014 | 1014 | if mqrebase: |
|
1015 | 1015 | mq.finish(repo, mqrebase.keys()) |
|
1016 | 1016 | |
|
1017 | 1017 | # We must start import from the newest revision |
|
1018 | 1018 | for rev in sorted(mqrebase, reverse=True): |
|
1019 | 1019 | if rev not in skipped: |
|
1020 | 1020 | name, isgit = mqrebase[rev] |
|
1021 | 1021 | repo.ui.note(_('updating mq patch %s to %s:%s\n') % |
|
1022 | 1022 | (name, state[rev], repo[state[rev]])) |
|
1023 | 1023 | mq.qimport(repo, (), patchname=name, git=isgit, |
|
1024 | 1024 | rev=[str(state[rev])]) |
|
1025 | 1025 | else: |
|
1026 | 1026 | # Rebased and skipped |
|
1027 | 1027 | skippedpatches.add(mqrebase[rev][0]) |
|
1028 | 1028 | |
|
1029 | 1029 | # Patches were either applied and rebased and imported in |
|
1030 | 1030 | # order, applied and removed or unapplied. Discard the removed |
|
1031 | 1031 | # ones while preserving the original series order and guards. |
|
1032 | 1032 | newseries = [s for s in original_series |
|
1033 | 1033 | if mq.guard_re.split(s, 1)[0] not in skippedpatches] |
|
1034 | 1034 | mq.fullseries[:] = newseries |
|
1035 | 1035 | mq.seriesdirty = True |
|
1036 | 1036 | mq.savedirty() |
|
1037 | 1037 | |
|
1038 | 1038 | def updatebookmarks(repo, targetnode, nstate, originalbookmarks, tr): |
|
1039 | 1039 | 'Move bookmarks to their correct changesets, and delete divergent ones' |
|
1040 | 1040 | marks = repo._bookmarks |
|
1041 | 1041 | for k, v in originalbookmarks.iteritems(): |
|
1042 | 1042 | if v in nstate: |
|
1043 | 1043 | # update the bookmarks for revs that have moved |
|
1044 | 1044 | marks[k] = nstate[v] |
|
1045 | 1045 | bookmarks.deletedivergent(repo, [targetnode], k) |
|
1046 | 1046 | marks.recordchange(tr) |
|
1047 | 1047 | |
|
1048 | 1048 | def storecollapsemsg(repo, collapsemsg): |
|
1049 | 1049 | 'Store the collapse message to allow recovery' |
|
1050 | 1050 | collapsemsg = collapsemsg or '' |
|
1051 | 1051 | f = repo.vfs("last-message.txt", "w") |
|
1052 | 1052 | f.write("%s\n" % collapsemsg) |
|
1053 | 1053 | f.close() |
|
1054 | 1054 | |
|
1055 | 1055 | def clearcollapsemsg(repo): |
|
1056 | 1056 | 'Remove collapse message file' |
|
1057 | 1057 | util.unlinkpath(repo.join("last-message.txt"), ignoremissing=True) |
|
1058 | 1058 | |
|
1059 | 1059 | def restorecollapsemsg(repo): |
|
1060 | 1060 | 'Restore previously stored collapse message' |
|
1061 | 1061 | try: |
|
1062 | 1062 | f = repo.vfs("last-message.txt") |
|
1063 | 1063 | collapsemsg = f.readline().strip() |
|
1064 | 1064 | f.close() |
|
1065 | 1065 | except IOError as err: |
|
1066 | 1066 | if err.errno != errno.ENOENT: |
|
1067 | 1067 | raise |
|
1068 | 1068 | raise error.Abort(_('no rebase in progress')) |
|
1069 | 1069 | return collapsemsg |
|
1070 | 1070 | |
|
1071 | 1071 | def storestatus(repo, originalwd, target, state, collapse, keep, keepbranches, |
|
1072 | 1072 | external, activebookmark): |
|
1073 | 1073 | 'Store the current status to allow recovery' |
|
1074 | 1074 | f = repo.vfs("rebasestate", "w") |
|
1075 | 1075 | f.write(repo[originalwd].hex() + '\n') |
|
1076 | 1076 | f.write(repo[target].hex() + '\n') |
|
1077 | 1077 | f.write(repo[external].hex() + '\n') |
|
1078 | 1078 | f.write('%d\n' % int(collapse)) |
|
1079 | 1079 | f.write('%d\n' % int(keep)) |
|
1080 | 1080 | f.write('%d\n' % int(keepbranches)) |
|
1081 | 1081 | f.write('%s\n' % (activebookmark or '')) |
|
1082 | 1082 | for d, v in state.iteritems(): |
|
1083 | 1083 | oldrev = repo[d].hex() |
|
1084 | 1084 | if v >= 0: |
|
1085 | 1085 | newrev = repo[v].hex() |
|
1086 | 1086 | elif v == revtodo: |
|
1087 | 1087 | # To maintain format compatibility, we have to use nullid. |
|
1088 | 1088 | # Please do remove this special case when upgrading the format. |
|
1089 | 1089 | newrev = hex(nullid) |
|
1090 | 1090 | else: |
|
1091 | 1091 | newrev = v |
|
1092 | 1092 | f.write("%s:%s\n" % (oldrev, newrev)) |
|
1093 | 1093 | f.close() |
|
1094 | 1094 | repo.ui.debug('rebase status stored\n') |
|
1095 | 1095 | |
|
1096 | 1096 | def clearstatus(repo): |
|
1097 | 1097 | 'Remove the status files' |
|
1098 | 1098 | _clearrebasesetvisibiliy(repo) |
|
1099 | 1099 | util.unlinkpath(repo.join("rebasestate"), ignoremissing=True) |
|
1100 | 1100 | |
|
1101 | 1101 | def needupdate(repo, state): |
|
1102 | 1102 | '''check whether we should `update --clean` away from a merge, or if |
|
1103 | 1103 | somehow the working dir got forcibly updated, e.g. by older hg''' |
|
1104 | 1104 | parents = [p.rev() for p in repo[None].parents()] |
|
1105 | 1105 | |
|
1106 | 1106 | # Are we in a merge state at all? |
|
1107 | 1107 | if len(parents) < 2: |
|
1108 | 1108 | return False |
|
1109 | 1109 | |
|
1110 | 1110 | # We should be standing on the first as-of-yet unrebased commit. |
|
1111 | 1111 | firstunrebased = min([old for old, new in state.iteritems() |
|
1112 | 1112 | if new == nullrev]) |
|
1113 | 1113 | if firstunrebased in parents: |
|
1114 | 1114 | return True |
|
1115 | 1115 | |
|
1116 | 1116 | return False |
|
1117 | 1117 | |
|
1118 | 1118 | def abort(repo, originalwd, target, state, activebookmark=None): |
|
1119 | 1119 | '''Restore the repository to its original state. Additional args: |
|
1120 | 1120 | |
|
1121 | 1121 | activebookmark: the name of the bookmark that should be active after the |
|
1122 | 1122 | restore''' |
|
1123 | 1123 | |
|
1124 | 1124 | try: |
|
1125 | 1125 | # If the first commits in the rebased set get skipped during the rebase, |
|
1126 | 1126 | # their values within the state mapping will be the target rev id. The |
|
1127 | 1127 | # dstates list must must not contain the target rev (issue4896) |
|
1128 | 1128 | dstates = [s for s in state.values() if s >= 0 and s != target] |
|
1129 | 1129 | immutable = [d for d in dstates if not repo[d].mutable()] |
|
1130 | 1130 | cleanup = True |
|
1131 | 1131 | if immutable: |
|
1132 | 1132 | repo.ui.warn(_("warning: can't clean up public changesets %s\n") |
|
1133 | 1133 | % ', '.join(str(repo[r]) for r in immutable), |
|
1134 | 1134 | hint=_("see 'hg help phases' for details")) |
|
1135 | 1135 | cleanup = False |
|
1136 | 1136 | |
|
1137 | 1137 | descendants = set() |
|
1138 | 1138 | if dstates: |
|
1139 | 1139 | descendants = set(repo.changelog.descendants(dstates)) |
|
1140 | 1140 | if descendants - set(dstates): |
|
1141 | 1141 | repo.ui.warn(_("warning: new changesets detected on target branch, " |
|
1142 | 1142 | "can't strip\n")) |
|
1143 | 1143 | cleanup = False |
|
1144 | 1144 | |
|
1145 | 1145 | if cleanup: |
|
1146 | 1146 | shouldupdate = False |
|
1147 | 1147 | rebased = filter(lambda x: x >= 0 and x != target, state.values()) |
|
1148 | 1148 | if rebased: |
|
1149 | 1149 | strippoints = [ |
|
1150 | 1150 | c.node() for c in repo.set('roots(%ld)', rebased)] |
|
1151 | 1151 | shouldupdate = len([ |
|
1152 | 1152 | c.node() for c in repo.set('. & (%ld)', rebased)]) > 0 |
|
1153 | 1153 | |
|
1154 | 1154 | # Update away from the rebase if necessary |
|
1155 | 1155 | if shouldupdate or needupdate(repo, state): |
|
1156 | 1156 | mergemod.update(repo, originalwd, False, True) |
|
1157 | 1157 | |
|
1158 | 1158 | # Strip from the first rebased revision |
|
1159 | 1159 | if rebased: |
|
1160 | 1160 | # no backup of rebased cset versions needed |
|
1161 | 1161 | repair.strip(repo.ui, repo, strippoints) |
|
1162 | 1162 | |
|
1163 | 1163 | if activebookmark and activebookmark in repo._bookmarks: |
|
1164 | 1164 | bookmarks.activate(repo, activebookmark) |
|
1165 | 1165 | |
|
1166 | 1166 | finally: |
|
1167 | 1167 | clearstatus(repo) |
|
1168 | 1168 | clearcollapsemsg(repo) |
|
1169 | 1169 | repo.ui.warn(_('rebase aborted\n')) |
|
1170 | 1170 | return 0 |
|
1171 | 1171 | |
|
1172 | 1172 | def buildstate(repo, dest, rebaseset, collapse, obsoletenotrebased): |
|
1173 | 1173 | '''Define which revisions are going to be rebased and where |
|
1174 | 1174 | |
|
1175 | 1175 | repo: repo |
|
1176 | 1176 | dest: context |
|
1177 | 1177 | rebaseset: set of rev |
|
1178 | 1178 | ''' |
|
1179 | 1179 | _setrebasesetvisibility(repo, rebaseset) |
|
1180 | 1180 | |
|
1181 | 1181 | # This check isn't strictly necessary, since mq detects commits over an |
|
1182 | 1182 | # applied patch. But it prevents messing up the working directory when |
|
1183 | 1183 | # a partially completed rebase is blocked by mq. |
|
1184 | 1184 | if 'qtip' in repo.tags() and (dest.node() in |
|
1185 | 1185 | [s.node for s in repo.mq.applied]): |
|
1186 | 1186 | raise error.Abort(_('cannot rebase onto an applied mq patch')) |
|
1187 | 1187 | |
|
1188 | 1188 | roots = list(repo.set('roots(%ld)', rebaseset)) |
|
1189 | 1189 | if not roots: |
|
1190 | 1190 | raise error.Abort(_('no matching revisions')) |
|
1191 | 1191 | roots.sort() |
|
1192 | 1192 | state = {} |
|
1193 | 1193 | detachset = set() |
|
1194 | 1194 | for root in roots: |
|
1195 | 1195 | commonbase = root.ancestor(dest) |
|
1196 | 1196 | if commonbase == root: |
|
1197 | 1197 | raise error.Abort(_('source is ancestor of destination')) |
|
1198 | 1198 | if commonbase == dest: |
|
1199 | 1199 | samebranch = root.branch() == dest.branch() |
|
1200 | 1200 | if not collapse and samebranch and root in dest.children(): |
|
1201 | 1201 | repo.ui.debug('source is a child of destination\n') |
|
1202 | 1202 | return None |
|
1203 | 1203 | |
|
1204 | 1204 | repo.ui.debug('rebase onto %s starting from %s\n' % (dest, root)) |
|
1205 | 1205 | state.update(dict.fromkeys(rebaseset, revtodo)) |
|
1206 | 1206 | # Rebase tries to turn <dest> into a parent of <root> while |
|
1207 | 1207 | # preserving the number of parents of rebased changesets: |
|
1208 | 1208 | # |
|
1209 | 1209 | # - A changeset with a single parent will always be rebased as a |
|
1210 | 1210 | # changeset with a single parent. |
|
1211 | 1211 | # |
|
1212 | 1212 | # - A merge will be rebased as merge unless its parents are both |
|
1213 | 1213 | # ancestors of <dest> or are themselves in the rebased set and |
|
1214 | 1214 | # pruned while rebased. |
|
1215 | 1215 | # |
|
1216 | 1216 | # If one parent of <root> is an ancestor of <dest>, the rebased |
|
1217 | 1217 | # version of this parent will be <dest>. This is always true with |
|
1218 | 1218 | # --base option. |
|
1219 | 1219 | # |
|
1220 | 1220 | # Otherwise, we need to *replace* the original parents with |
|
1221 | 1221 | # <dest>. This "detaches" the rebased set from its former location |
|
1222 | 1222 | # and rebases it onto <dest>. Changes introduced by ancestors of |
|
1223 | 1223 | # <root> not common with <dest> (the detachset, marked as |
|
1224 | 1224 | # nullmerge) are "removed" from the rebased changesets. |
|
1225 | 1225 | # |
|
1226 | 1226 | # - If <root> has a single parent, set it to <dest>. |
|
1227 | 1227 | # |
|
1228 | 1228 | # - If <root> is a merge, we cannot decide which parent to |
|
1229 | 1229 | # replace, the rebase operation is not clearly defined. |
|
1230 | 1230 | # |
|
1231 | 1231 | # The table below sums up this behavior: |
|
1232 | 1232 | # |
|
1233 | 1233 | # +------------------+----------------------+-------------------------+ |
|
1234 | 1234 | # | | one parent | merge | |
|
1235 | 1235 | # +------------------+----------------------+-------------------------+ |
|
1236 | 1236 | # | parent in | new parent is <dest> | parents in ::<dest> are | |
|
1237 | 1237 | # | ::<dest> | | remapped to <dest> | |
|
1238 | 1238 | # +------------------+----------------------+-------------------------+ |
|
1239 | 1239 | # | unrelated source | new parent is <dest> | ambiguous, abort | |
|
1240 | 1240 | # +------------------+----------------------+-------------------------+ |
|
1241 | 1241 | # |
|
1242 | 1242 | # The actual abort is handled by `defineparents` |
|
1243 | 1243 | if len(root.parents()) <= 1: |
|
1244 | 1244 | # ancestors of <root> not ancestors of <dest> |
|
1245 | 1245 | detachset.update(repo.changelog.findmissingrevs([commonbase.rev()], |
|
1246 | 1246 | [root.rev()])) |
|
1247 | 1247 | for r in detachset: |
|
1248 | 1248 | if r not in state: |
|
1249 | 1249 | state[r] = nullmerge |
|
1250 | 1250 | if len(roots) > 1: |
|
1251 | 1251 | # If we have multiple roots, we may have "hole" in the rebase set. |
|
1252 | 1252 | # Rebase roots that descend from those "hole" should not be detached as |
|
1253 | 1253 | # other root are. We use the special `revignored` to inform rebase that |
|
1254 | 1254 | # the revision should be ignored but that `defineparents` should search |
|
1255 | 1255 | # a rebase destination that make sense regarding rebased topology. |
|
1256 | 1256 | rebasedomain = set(repo.revs('%ld::%ld', rebaseset, rebaseset)) |
|
1257 | 1257 | for ignored in set(rebasedomain) - set(rebaseset): |
|
1258 | 1258 | state[ignored] = revignored |
|
1259 | 1259 | for r in obsoletenotrebased: |
|
1260 | 1260 | if obsoletenotrebased[r] is None: |
|
1261 | 1261 | state[r] = revpruned |
|
1262 | 1262 | else: |
|
1263 | 1263 | state[r] = revprecursor |
|
1264 | 1264 | return repo['.'].rev(), dest.rev(), state |
|
1265 | 1265 | |
|
1266 | 1266 | def clearrebased(ui, repo, state, skipped, collapsedas=None): |
|
1267 | 1267 | """dispose of rebased revision at the end of the rebase |
|
1268 | 1268 | |
|
1269 | 1269 | If `collapsedas` is not None, the rebase was a collapse whose result if the |
|
1270 | 1270 | `collapsedas` node.""" |
|
1271 | 1271 | if obsolete.isenabled(repo, obsolete.createmarkersopt): |
|
1272 | 1272 | markers = [] |
|
1273 | 1273 | for rev, newrev in sorted(state.items()): |
|
1274 | 1274 | if newrev >= 0: |
|
1275 | 1275 | if rev in skipped: |
|
1276 | 1276 | succs = () |
|
1277 | 1277 | elif collapsedas is not None: |
|
1278 | 1278 | succs = (repo[collapsedas],) |
|
1279 | 1279 | else: |
|
1280 | 1280 | succs = (repo[newrev],) |
|
1281 | 1281 | markers.append((repo[rev], succs)) |
|
1282 | 1282 | if markers: |
|
1283 | 1283 | obsolete.createmarkers(repo, markers) |
|
1284 | 1284 | else: |
|
1285 | 1285 | rebased = [rev for rev in state if state[rev] > nullmerge] |
|
1286 | 1286 | if rebased: |
|
1287 | 1287 | stripped = [] |
|
1288 | 1288 | for root in repo.set('roots(%ld)', rebased): |
|
1289 | 1289 | if set(repo.changelog.descendants([root.rev()])) - set(state): |
|
1290 | 1290 | ui.warn(_("warning: new changesets detected " |
|
1291 | 1291 | "on source branch, not stripping\n")) |
|
1292 | 1292 | else: |
|
1293 | 1293 | stripped.append(root.node()) |
|
1294 | 1294 | if stripped: |
|
1295 | 1295 | # backup the old csets by default |
|
1296 | 1296 | repair.strip(ui, repo, stripped, "all") |
|
1297 | 1297 | |
|
1298 | 1298 | |
|
1299 | 1299 | def pullrebase(orig, ui, repo, *args, **opts): |
|
1300 | 1300 | 'Call rebase after pull if the latter has been invoked with --rebase' |
|
1301 | 1301 | ret = None |
|
1302 | 1302 | if opts.get('rebase'): |
|
1303 | 1303 | wlock = lock = None |
|
1304 | 1304 | try: |
|
1305 | 1305 | wlock = repo.wlock() |
|
1306 | 1306 | lock = repo.lock() |
|
1307 | 1307 | if opts.get('update'): |
|
1308 | 1308 | del opts['update'] |
|
1309 | 1309 | ui.debug('--update and --rebase are not compatible, ignoring ' |
|
1310 | 1310 | 'the update flag\n') |
|
1311 | 1311 | |
|
1312 | 1312 | revsprepull = len(repo) |
|
1313 | 1313 | origpostincoming = commands.postincoming |
|
1314 | 1314 | def _dummy(*args, **kwargs): |
|
1315 | 1315 | pass |
|
1316 | 1316 | commands.postincoming = _dummy |
|
1317 | 1317 | try: |
|
1318 | 1318 | ret = orig(ui, repo, *args, **opts) |
|
1319 | 1319 | finally: |
|
1320 | 1320 | commands.postincoming = origpostincoming |
|
1321 | 1321 | revspostpull = len(repo) |
|
1322 | 1322 | if revspostpull > revsprepull: |
|
1323 | 1323 | # --rev option from pull conflict with rebase own --rev |
|
1324 | 1324 | # dropping it |
|
1325 | 1325 | if 'rev' in opts: |
|
1326 | 1326 | del opts['rev'] |
|
1327 | 1327 | # positional argument from pull conflicts with rebase's own |
|
1328 | 1328 | # --source. |
|
1329 | 1329 | if 'source' in opts: |
|
1330 | 1330 | del opts['source'] |
|
1331 | 1331 | # revsprepull is the len of the repo, not revnum of tip. |
|
1332 | 1332 | destspace = list(repo.changelog.revs(start=revsprepull)) |
|
1333 | 1333 | opts['_destspace'] = destspace |
|
1334 | 1334 | try: |
|
1335 | 1335 | rebase(ui, repo, **opts) |
|
1336 | 1336 | except error.NoMergeDestAbort: |
|
1337 | 1337 | # we can maybe update instead |
|
1338 | 1338 | rev, _a, _b = destutil.destupdate(repo) |
|
1339 | 1339 | if rev == repo['.'].rev(): |
|
1340 | 1340 | ui.status(_('nothing to rebase\n')) |
|
1341 | 1341 | else: |
|
1342 | 1342 | ui.status(_('nothing to rebase - updating instead\n')) |
|
1343 | 1343 | # not passing argument to get the bare update behavior |
|
1344 | 1344 | # with warning and trumpets |
|
1345 | 1345 | commands.update(ui, repo) |
|
1346 | 1346 | finally: |
|
1347 | 1347 | release(lock, wlock) |
|
1348 | 1348 | else: |
|
1349 | 1349 | if opts.get('tool'): |
|
1350 | 1350 | raise error.Abort(_('--tool can only be used with --rebase')) |
|
1351 | 1351 | ret = orig(ui, repo, *args, **opts) |
|
1352 | 1352 | |
|
1353 | 1353 | return ret |
|
1354 | 1354 | |
|
1355 | 1355 | def _setrebasesetvisibility(repo, revs): |
|
1356 | 1356 | """store the currently rebased set on the repo object |
|
1357 | 1357 | |
|
1358 | 1358 | This is used by another function to prevent rebased revision to because |
|
1359 | 1359 | hidden (see issue4505)""" |
|
1360 | 1360 | repo = repo.unfiltered() |
|
1361 | 1361 | revs = set(revs) |
|
1362 | 1362 | repo._rebaseset = revs |
|
1363 | 1363 | # invalidate cache if visibility changes |
|
1364 | 1364 | hiddens = repo.filteredrevcache.get('visible', set()) |
|
1365 | 1365 | if revs & hiddens: |
|
1366 | 1366 | repo.invalidatevolatilesets() |
|
1367 | 1367 | |
|
1368 | 1368 | def _clearrebasesetvisibiliy(repo): |
|
1369 | 1369 | """remove rebaseset data from the repo""" |
|
1370 | 1370 | repo = repo.unfiltered() |
|
1371 | 1371 | if '_rebaseset' in vars(repo): |
|
1372 | 1372 | del repo._rebaseset |
|
1373 | 1373 | |
|
1374 | 1374 | def _rebasedvisible(orig, repo): |
|
1375 | 1375 | """ensure rebased revs stay visible (see issue4505)""" |
|
1376 | 1376 | blockers = orig(repo) |
|
1377 | 1377 | blockers.update(getattr(repo, '_rebaseset', ())) |
|
1378 | 1378 | return blockers |
|
1379 | 1379 | |
|
1380 | 1380 | def _filterobsoleterevs(repo, revs): |
|
1381 | 1381 | """returns a set of the obsolete revisions in revs""" |
|
1382 | 1382 | return set(r for r in revs if repo[r].obsolete()) |
|
1383 | 1383 | |
|
1384 | 1384 | def _computeobsoletenotrebased(repo, rebaseobsrevs, dest): |
|
1385 | 1385 | """return a mapping obsolete => successor for all obsolete nodes to be |
|
1386 | 1386 | rebased that have a successors in the destination |
|
1387 | 1387 | |
|
1388 | obsolete => None entries in the mapping indicate nodes with no succesor""" | |
|
1388 | obsolete => None entries in the mapping indicate nodes with no successor""" | |
|
1389 | 1389 | obsoletenotrebased = {} |
|
1390 | 1390 | |
|
1391 | 1391 | # Build a mapping successor => obsolete nodes for the obsolete |
|
1392 | 1392 | # nodes to be rebased |
|
1393 | 1393 | allsuccessors = {} |
|
1394 | 1394 | cl = repo.changelog |
|
1395 | 1395 | for r in rebaseobsrevs: |
|
1396 | 1396 | node = cl.node(r) |
|
1397 | 1397 | for s in obsolete.allsuccessors(repo.obsstore, [node]): |
|
1398 | 1398 | try: |
|
1399 | 1399 | allsuccessors[cl.rev(s)] = cl.rev(node) |
|
1400 | 1400 | except LookupError: |
|
1401 | 1401 | pass |
|
1402 | 1402 | |
|
1403 | 1403 | if allsuccessors: |
|
1404 | 1404 | # Look for successors of obsolete nodes to be rebased among |
|
1405 | 1405 | # the ancestors of dest |
|
1406 | 1406 | ancs = cl.ancestors([repo[dest].rev()], |
|
1407 | 1407 | stoprev=min(allsuccessors), |
|
1408 | 1408 | inclusive=True) |
|
1409 | 1409 | for s in allsuccessors: |
|
1410 | 1410 | if s in ancs: |
|
1411 | 1411 | obsoletenotrebased[allsuccessors[s]] = s |
|
1412 | 1412 | elif (s == allsuccessors[s] and |
|
1413 | 1413 | allsuccessors.values().count(s) == 1): |
|
1414 | 1414 | # plain prune |
|
1415 | 1415 | obsoletenotrebased[s] = None |
|
1416 | 1416 | |
|
1417 | 1417 | return obsoletenotrebased |
|
1418 | 1418 | |
|
1419 | 1419 | def summaryhook(ui, repo): |
|
1420 | 1420 | if not os.path.exists(repo.join('rebasestate')): |
|
1421 | 1421 | return |
|
1422 | 1422 | try: |
|
1423 | 1423 | rbsrt = rebaseruntime(repo, ui, {}) |
|
1424 | 1424 | rbsrt.restorestatus() |
|
1425 | 1425 | state = rbsrt.state |
|
1426 | 1426 | except error.RepoLookupError: |
|
1427 | 1427 | # i18n: column positioning for "hg summary" |
|
1428 | 1428 | msg = _('rebase: (use "hg rebase --abort" to clear broken state)\n') |
|
1429 | 1429 | ui.write(msg) |
|
1430 | 1430 | return |
|
1431 | 1431 | numrebased = len([i for i in state.itervalues() if i >= 0]) |
|
1432 | 1432 | # i18n: column positioning for "hg summary" |
|
1433 | 1433 | ui.write(_('rebase: %s, %s (rebase --continue)\n') % |
|
1434 | 1434 | (ui.label(_('%d rebased'), 'rebase.rebased') % numrebased, |
|
1435 | 1435 | ui.label(_('%d remaining'), 'rebase.remaining') % |
|
1436 | 1436 | (len(state) - numrebased))) |
|
1437 | 1437 | |
|
1438 | 1438 | def uisetup(ui): |
|
1439 | 1439 | #Replace pull with a decorator to provide --rebase option |
|
1440 | 1440 | entry = extensions.wrapcommand(commands.table, 'pull', pullrebase) |
|
1441 | 1441 | entry[1].append(('', 'rebase', None, |
|
1442 | 1442 | _("rebase working directory to branch head"))) |
|
1443 | 1443 | entry[1].append(('t', 'tool', '', |
|
1444 | 1444 | _("specify merge tool for rebase"))) |
|
1445 | 1445 | cmdutil.summaryhooks.add('rebase', summaryhook) |
|
1446 | 1446 | cmdutil.unfinishedstates.append( |
|
1447 | 1447 | ['rebasestate', False, False, _('rebase in progress'), |
|
1448 | 1448 | _("use 'hg rebase --continue' or 'hg rebase --abort'")]) |
|
1449 | 1449 | cmdutil.afterresolvedstates.append( |
|
1450 | 1450 | ['rebasestate', _('hg rebase --continue')]) |
|
1451 | 1451 | # ensure rebased rev are not hidden |
|
1452 | 1452 | extensions.wrapfunction(repoview, '_getdynamicblockers', _rebasedvisible) |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
General Comments 0
You need to be logged in to leave comments.
Login now