Show More
@@ -1,748 +1,744 | |||
|
1 | 1 | # dirstate.py - working directory tracking for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | import errno |
|
8 | 8 | |
|
9 | 9 | from node import nullid |
|
10 | 10 | from i18n import _ |
|
11 | 11 | import scmutil, util, ignore, osutil, parsers, encoding |
|
12 | 12 | import struct, os, stat, errno |
|
13 | 13 | import cStringIO |
|
14 | 14 | |
|
15 | 15 | _format = ">cllll" |
|
16 | 16 | propertycache = util.propertycache |
|
17 | 17 | filecache = scmutil.filecache |
|
18 | 18 | |
|
19 | 19 | class repocache(filecache): |
|
20 | 20 | """filecache for files in .hg/""" |
|
21 | 21 | def join(self, obj, fname): |
|
22 | 22 | return obj._opener.join(fname) |
|
23 | 23 | |
|
24 | 24 | class rootcache(filecache): |
|
25 | 25 | """filecache for files in the repository root""" |
|
26 | 26 | def join(self, obj, fname): |
|
27 | 27 | return obj._join(fname) |
|
28 | 28 | |
|
29 | 29 | def _finddirs(path): |
|
30 | 30 | pos = path.rfind('/') |
|
31 | 31 | while pos != -1: |
|
32 | 32 | yield path[:pos] |
|
33 | 33 | pos = path.rfind('/', 0, pos) |
|
34 | 34 | |
|
35 | 35 | def _incdirs(dirs, path): |
|
36 | 36 | for base in _finddirs(path): |
|
37 | 37 | if base in dirs: |
|
38 | 38 | dirs[base] += 1 |
|
39 | 39 | return |
|
40 | 40 | dirs[base] = 1 |
|
41 | 41 | |
|
42 | 42 | def _decdirs(dirs, path): |
|
43 | 43 | for base in _finddirs(path): |
|
44 | 44 | if dirs[base] > 1: |
|
45 | 45 | dirs[base] -= 1 |
|
46 | 46 | return |
|
47 | 47 | del dirs[base] |
|
48 | 48 | |
|
49 | 49 | class dirstate(object): |
|
50 | 50 | |
|
51 | 51 | def __init__(self, opener, ui, root, validate): |
|
52 | 52 | '''Create a new dirstate object. |
|
53 | 53 | |
|
54 | 54 | opener is an open()-like callable that can be used to open the |
|
55 | 55 | dirstate file; root is the root of the directory tracked by |
|
56 | 56 | the dirstate. |
|
57 | 57 | ''' |
|
58 | 58 | self._opener = opener |
|
59 | 59 | self._validate = validate |
|
60 | 60 | self._root = root |
|
61 | 61 | self._rootdir = os.path.join(root, '') |
|
62 | 62 | self._dirty = False |
|
63 | 63 | self._dirtypl = False |
|
64 | 64 | self._lastnormaltime = 0 |
|
65 | 65 | self._ui = ui |
|
66 | 66 | self._filecache = {} |
|
67 | 67 | |
|
68 | 68 | @propertycache |
|
69 | 69 | def _map(self): |
|
70 | 70 | '''Return the dirstate contents as a map from filename to |
|
71 | 71 | (state, mode, size, time).''' |
|
72 | 72 | self._read() |
|
73 | 73 | return self._map |
|
74 | 74 | |
|
75 | 75 | @propertycache |
|
76 | 76 | def _copymap(self): |
|
77 | 77 | self._read() |
|
78 | 78 | return self._copymap |
|
79 | 79 | |
|
80 | 80 | @propertycache |
|
81 | def _normroot(self): | |
|
82 | return util.normcase(self._root) | |
|
83 | ||
|
84 | @propertycache | |
|
85 | 81 | def _foldmap(self): |
|
86 | 82 | f = {} |
|
87 | 83 | for name in self._map: |
|
88 | 84 | f[util.normcase(name)] = name |
|
89 | 85 | f['.'] = '.' # prevents useless util.fspath() invocation |
|
90 | 86 | return f |
|
91 | 87 | |
|
92 | 88 | @repocache('branch') |
|
93 | 89 | def _branch(self): |
|
94 | 90 | try: |
|
95 | 91 | return self._opener.read("branch").strip() or "default" |
|
96 | 92 | except IOError, inst: |
|
97 | 93 | if inst.errno != errno.ENOENT: |
|
98 | 94 | raise |
|
99 | 95 | return "default" |
|
100 | 96 | |
|
101 | 97 | @propertycache |
|
102 | 98 | def _pl(self): |
|
103 | 99 | try: |
|
104 | 100 | fp = self._opener("dirstate") |
|
105 | 101 | st = fp.read(40) |
|
106 | 102 | fp.close() |
|
107 | 103 | l = len(st) |
|
108 | 104 | if l == 40: |
|
109 | 105 | return st[:20], st[20:40] |
|
110 | 106 | elif l > 0 and l < 40: |
|
111 | 107 | raise util.Abort(_('working directory state appears damaged!')) |
|
112 | 108 | except IOError, err: |
|
113 | 109 | if err.errno != errno.ENOENT: |
|
114 | 110 | raise |
|
115 | 111 | return [nullid, nullid] |
|
116 | 112 | |
|
117 | 113 | @propertycache |
|
118 | 114 | def _dirs(self): |
|
119 | 115 | dirs = {} |
|
120 | 116 | for f, s in self._map.iteritems(): |
|
121 | 117 | if s[0] != 'r': |
|
122 | 118 | _incdirs(dirs, f) |
|
123 | 119 | return dirs |
|
124 | 120 | |
|
125 | 121 | def dirs(self): |
|
126 | 122 | return self._dirs |
|
127 | 123 | |
|
128 | 124 | @rootcache('.hgignore') |
|
129 | 125 | def _ignore(self): |
|
130 | 126 | files = [self._join('.hgignore')] |
|
131 | 127 | for name, path in self._ui.configitems("ui"): |
|
132 | 128 | if name == 'ignore' or name.startswith('ignore.'): |
|
133 | 129 | files.append(util.expandpath(path)) |
|
134 | 130 | return ignore.ignore(self._root, files, self._ui.warn) |
|
135 | 131 | |
|
136 | 132 | @propertycache |
|
137 | 133 | def _slash(self): |
|
138 | 134 | return self._ui.configbool('ui', 'slash') and os.sep != '/' |
|
139 | 135 | |
|
140 | 136 | @propertycache |
|
141 | 137 | def _checklink(self): |
|
142 | 138 | return util.checklink(self._root) |
|
143 | 139 | |
|
144 | 140 | @propertycache |
|
145 | 141 | def _checkexec(self): |
|
146 | 142 | return util.checkexec(self._root) |
|
147 | 143 | |
|
148 | 144 | @propertycache |
|
149 | 145 | def _checkcase(self): |
|
150 | 146 | return not util.checkcase(self._join('.hg')) |
|
151 | 147 | |
|
152 | 148 | def _join(self, f): |
|
153 | 149 | # much faster than os.path.join() |
|
154 | 150 | # it's safe because f is always a relative path |
|
155 | 151 | return self._rootdir + f |
|
156 | 152 | |
|
157 | 153 | def flagfunc(self, buildfallback): |
|
158 | 154 | if self._checklink and self._checkexec: |
|
159 | 155 | def f(x): |
|
160 | 156 | p = self._join(x) |
|
161 | 157 | if os.path.islink(p): |
|
162 | 158 | return 'l' |
|
163 | 159 | if util.isexec(p): |
|
164 | 160 | return 'x' |
|
165 | 161 | return '' |
|
166 | 162 | return f |
|
167 | 163 | |
|
168 | 164 | fallback = buildfallback() |
|
169 | 165 | if self._checklink: |
|
170 | 166 | def f(x): |
|
171 | 167 | if os.path.islink(self._join(x)): |
|
172 | 168 | return 'l' |
|
173 | 169 | if 'x' in fallback(x): |
|
174 | 170 | return 'x' |
|
175 | 171 | return '' |
|
176 | 172 | return f |
|
177 | 173 | if self._checkexec: |
|
178 | 174 | def f(x): |
|
179 | 175 | if 'l' in fallback(x): |
|
180 | 176 | return 'l' |
|
181 | 177 | if util.isexec(self._join(x)): |
|
182 | 178 | return 'x' |
|
183 | 179 | return '' |
|
184 | 180 | return f |
|
185 | 181 | else: |
|
186 | 182 | return fallback |
|
187 | 183 | |
|
188 | 184 | def getcwd(self): |
|
189 | 185 | cwd = os.getcwd() |
|
190 | 186 | if cwd == self._root: |
|
191 | 187 | return '' |
|
192 | 188 | # self._root ends with a path separator if self._root is '/' or 'C:\' |
|
193 | 189 | rootsep = self._root |
|
194 | 190 | if not util.endswithsep(rootsep): |
|
195 | 191 | rootsep += os.sep |
|
196 | 192 | if cwd.startswith(rootsep): |
|
197 | 193 | return cwd[len(rootsep):] |
|
198 | 194 | else: |
|
199 | 195 | # we're outside the repo. return an absolute path. |
|
200 | 196 | return cwd |
|
201 | 197 | |
|
202 | 198 | def pathto(self, f, cwd=None): |
|
203 | 199 | if cwd is None: |
|
204 | 200 | cwd = self.getcwd() |
|
205 | 201 | path = util.pathto(self._root, cwd, f) |
|
206 | 202 | if self._slash: |
|
207 | 203 | return util.normpath(path) |
|
208 | 204 | return path |
|
209 | 205 | |
|
210 | 206 | def __getitem__(self, key): |
|
211 | 207 | '''Return the current state of key (a filename) in the dirstate. |
|
212 | 208 | |
|
213 | 209 | States are: |
|
214 | 210 | n normal |
|
215 | 211 | m needs merging |
|
216 | 212 | r marked for removal |
|
217 | 213 | a marked for addition |
|
218 | 214 | ? not tracked |
|
219 | 215 | ''' |
|
220 | 216 | return self._map.get(key, ("?",))[0] |
|
221 | 217 | |
|
222 | 218 | def __contains__(self, key): |
|
223 | 219 | return key in self._map |
|
224 | 220 | |
|
225 | 221 | def __iter__(self): |
|
226 | 222 | for x in sorted(self._map): |
|
227 | 223 | yield x |
|
228 | 224 | |
|
229 | 225 | def parents(self): |
|
230 | 226 | return [self._validate(p) for p in self._pl] |
|
231 | 227 | |
|
232 | 228 | def p1(self): |
|
233 | 229 | return self._validate(self._pl[0]) |
|
234 | 230 | |
|
235 | 231 | def p2(self): |
|
236 | 232 | return self._validate(self._pl[1]) |
|
237 | 233 | |
|
238 | 234 | def branch(self): |
|
239 | 235 | return encoding.tolocal(self._branch) |
|
240 | 236 | |
|
241 | 237 | def setparents(self, p1, p2=nullid): |
|
242 | 238 | self._dirty = self._dirtypl = True |
|
243 | 239 | self._pl = p1, p2 |
|
244 | 240 | |
|
245 | 241 | def setbranch(self, branch): |
|
246 | 242 | if branch in ['tip', '.', 'null']: |
|
247 | 243 | raise util.Abort(_('the name \'%s\' is reserved') % branch) |
|
248 | 244 | self._branch = encoding.fromlocal(branch) |
|
249 | 245 | self._opener.write("branch", self._branch + '\n') |
|
250 | 246 | |
|
251 | 247 | def _read(self): |
|
252 | 248 | self._map = {} |
|
253 | 249 | self._copymap = {} |
|
254 | 250 | try: |
|
255 | 251 | st = self._opener.read("dirstate") |
|
256 | 252 | except IOError, err: |
|
257 | 253 | if err.errno != errno.ENOENT: |
|
258 | 254 | raise |
|
259 | 255 | return |
|
260 | 256 | if not st: |
|
261 | 257 | return |
|
262 | 258 | |
|
263 | 259 | p = parsers.parse_dirstate(self._map, self._copymap, st) |
|
264 | 260 | if not self._dirtypl: |
|
265 | 261 | self._pl = p |
|
266 | 262 | |
|
267 | 263 | def invalidate(self): |
|
268 | 264 | for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs", |
|
269 | 265 | "_ignore"): |
|
270 | 266 | if a in self.__dict__: |
|
271 | 267 | delattr(self, a) |
|
272 | 268 | self._lastnormaltime = 0 |
|
273 | 269 | self._dirty = False |
|
274 | 270 | |
|
275 | 271 | def copy(self, source, dest): |
|
276 | 272 | """Mark dest as a copy of source. Unmark dest if source is None.""" |
|
277 | 273 | if source == dest: |
|
278 | 274 | return |
|
279 | 275 | self._dirty = True |
|
280 | 276 | if source is not None: |
|
281 | 277 | self._copymap[dest] = source |
|
282 | 278 | elif dest in self._copymap: |
|
283 | 279 | del self._copymap[dest] |
|
284 | 280 | |
|
285 | 281 | def copied(self, file): |
|
286 | 282 | return self._copymap.get(file, None) |
|
287 | 283 | |
|
288 | 284 | def copies(self): |
|
289 | 285 | return self._copymap |
|
290 | 286 | |
|
291 | 287 | def _droppath(self, f): |
|
292 | 288 | if self[f] not in "?r" and "_dirs" in self.__dict__: |
|
293 | 289 | _decdirs(self._dirs, f) |
|
294 | 290 | |
|
295 | 291 | def _addpath(self, f, check=False): |
|
296 | 292 | oldstate = self[f] |
|
297 | 293 | if check or oldstate == "r": |
|
298 | 294 | scmutil.checkfilename(f) |
|
299 | 295 | if f in self._dirs: |
|
300 | 296 | raise util.Abort(_('directory %r already in dirstate') % f) |
|
301 | 297 | # shadows |
|
302 | 298 | for d in _finddirs(f): |
|
303 | 299 | if d in self._dirs: |
|
304 | 300 | break |
|
305 | 301 | if d in self._map and self[d] != 'r': |
|
306 | 302 | raise util.Abort( |
|
307 | 303 | _('file %r in dirstate clashes with %r') % (d, f)) |
|
308 | 304 | if oldstate in "?r" and "_dirs" in self.__dict__: |
|
309 | 305 | _incdirs(self._dirs, f) |
|
310 | 306 | |
|
311 | 307 | def normal(self, f): |
|
312 | 308 | '''Mark a file normal and clean.''' |
|
313 | 309 | self._dirty = True |
|
314 | 310 | self._addpath(f) |
|
315 | 311 | s = os.lstat(self._join(f)) |
|
316 | 312 | mtime = int(s.st_mtime) |
|
317 | 313 | self._map[f] = ('n', s.st_mode, s.st_size, mtime) |
|
318 | 314 | if f in self._copymap: |
|
319 | 315 | del self._copymap[f] |
|
320 | 316 | if mtime > self._lastnormaltime: |
|
321 | 317 | # Remember the most recent modification timeslot for status(), |
|
322 | 318 | # to make sure we won't miss future size-preserving file content |
|
323 | 319 | # modifications that happen within the same timeslot. |
|
324 | 320 | self._lastnormaltime = mtime |
|
325 | 321 | |
|
326 | 322 | def normallookup(self, f): |
|
327 | 323 | '''Mark a file normal, but possibly dirty.''' |
|
328 | 324 | if self._pl[1] != nullid and f in self._map: |
|
329 | 325 | # if there is a merge going on and the file was either |
|
330 | 326 | # in state 'm' (-1) or coming from other parent (-2) before |
|
331 | 327 | # being removed, restore that state. |
|
332 | 328 | entry = self._map[f] |
|
333 | 329 | if entry[0] == 'r' and entry[2] in (-1, -2): |
|
334 | 330 | source = self._copymap.get(f) |
|
335 | 331 | if entry[2] == -1: |
|
336 | 332 | self.merge(f) |
|
337 | 333 | elif entry[2] == -2: |
|
338 | 334 | self.otherparent(f) |
|
339 | 335 | if source: |
|
340 | 336 | self.copy(source, f) |
|
341 | 337 | return |
|
342 | 338 | if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2: |
|
343 | 339 | return |
|
344 | 340 | self._dirty = True |
|
345 | 341 | self._addpath(f) |
|
346 | 342 | self._map[f] = ('n', 0, -1, -1) |
|
347 | 343 | if f in self._copymap: |
|
348 | 344 | del self._copymap[f] |
|
349 | 345 | |
|
350 | 346 | def otherparent(self, f): |
|
351 | 347 | '''Mark as coming from the other parent, always dirty.''' |
|
352 | 348 | if self._pl[1] == nullid: |
|
353 | 349 | raise util.Abort(_("setting %r to other parent " |
|
354 | 350 | "only allowed in merges") % f) |
|
355 | 351 | self._dirty = True |
|
356 | 352 | self._addpath(f) |
|
357 | 353 | self._map[f] = ('n', 0, -2, -1) |
|
358 | 354 | if f in self._copymap: |
|
359 | 355 | del self._copymap[f] |
|
360 | 356 | |
|
361 | 357 | def add(self, f): |
|
362 | 358 | '''Mark a file added.''' |
|
363 | 359 | self._dirty = True |
|
364 | 360 | self._addpath(f, True) |
|
365 | 361 | self._map[f] = ('a', 0, -1, -1) |
|
366 | 362 | if f in self._copymap: |
|
367 | 363 | del self._copymap[f] |
|
368 | 364 | |
|
369 | 365 | def remove(self, f): |
|
370 | 366 | '''Mark a file removed.''' |
|
371 | 367 | self._dirty = True |
|
372 | 368 | self._droppath(f) |
|
373 | 369 | size = 0 |
|
374 | 370 | if self._pl[1] != nullid and f in self._map: |
|
375 | 371 | # backup the previous state |
|
376 | 372 | entry = self._map[f] |
|
377 | 373 | if entry[0] == 'm': # merge |
|
378 | 374 | size = -1 |
|
379 | 375 | elif entry[0] == 'n' and entry[2] == -2: # other parent |
|
380 | 376 | size = -2 |
|
381 | 377 | self._map[f] = ('r', 0, size, 0) |
|
382 | 378 | if size == 0 and f in self._copymap: |
|
383 | 379 | del self._copymap[f] |
|
384 | 380 | |
|
385 | 381 | def merge(self, f): |
|
386 | 382 | '''Mark a file merged.''' |
|
387 | 383 | self._dirty = True |
|
388 | 384 | s = os.lstat(self._join(f)) |
|
389 | 385 | self._addpath(f) |
|
390 | 386 | self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime)) |
|
391 | 387 | if f in self._copymap: |
|
392 | 388 | del self._copymap[f] |
|
393 | 389 | |
|
394 | 390 | def drop(self, f): |
|
395 | 391 | '''Drop a file from the dirstate''' |
|
396 | 392 | if f in self._map: |
|
397 | 393 | self._dirty = True |
|
398 | 394 | self._droppath(f) |
|
399 | 395 | del self._map[f] |
|
400 | 396 | |
|
401 | 397 | def _normalize(self, path, isknown): |
|
402 | 398 | normed = util.normcase(path) |
|
403 | 399 | folded = self._foldmap.get(normed, None) |
|
404 | 400 | if folded is None: |
|
405 | 401 | if isknown or not os.path.lexists(os.path.join(self._root, path)): |
|
406 | 402 | folded = path |
|
407 | 403 | else: |
|
408 | 404 | folded = self._foldmap.setdefault(normed, |
|
409 |
util.fspath(normed, self._ |
|
|
405 | util.fspath(normed, self._root)) | |
|
410 | 406 | return folded |
|
411 | 407 | |
|
412 | 408 | def normalize(self, path, isknown=False): |
|
413 | 409 | ''' |
|
414 | 410 | normalize the case of a pathname when on a casefolding filesystem |
|
415 | 411 | |
|
416 | 412 | isknown specifies whether the filename came from walking the |
|
417 | 413 | disk, to avoid extra filesystem access |
|
418 | 414 | |
|
419 | 415 | The normalized case is determined based on the following precedence: |
|
420 | 416 | |
|
421 | 417 | - version of name already stored in the dirstate |
|
422 | 418 | - version of name stored on disk |
|
423 | 419 | - version provided via command arguments |
|
424 | 420 | ''' |
|
425 | 421 | |
|
426 | 422 | if self._checkcase: |
|
427 | 423 | return self._normalize(path, isknown) |
|
428 | 424 | return path |
|
429 | 425 | |
|
430 | 426 | def clear(self): |
|
431 | 427 | self._map = {} |
|
432 | 428 | if "_dirs" in self.__dict__: |
|
433 | 429 | delattr(self, "_dirs") |
|
434 | 430 | self._copymap = {} |
|
435 | 431 | self._pl = [nullid, nullid] |
|
436 | 432 | self._lastnormaltime = 0 |
|
437 | 433 | self._dirty = True |
|
438 | 434 | |
|
439 | 435 | def rebuild(self, parent, files): |
|
440 | 436 | self.clear() |
|
441 | 437 | for f in files: |
|
442 | 438 | if 'x' in files.flags(f): |
|
443 | 439 | self._map[f] = ('n', 0777, -1, 0) |
|
444 | 440 | else: |
|
445 | 441 | self._map[f] = ('n', 0666, -1, 0) |
|
446 | 442 | self._pl = (parent, nullid) |
|
447 | 443 | self._dirty = True |
|
448 | 444 | |
|
449 | 445 | def write(self): |
|
450 | 446 | if not self._dirty: |
|
451 | 447 | return |
|
452 | 448 | st = self._opener("dirstate", "w", atomictemp=True) |
|
453 | 449 | |
|
454 | 450 | # use the modification time of the newly created temporary file as the |
|
455 | 451 | # filesystem's notion of 'now' |
|
456 | 452 | now = int(util.fstat(st).st_mtime) |
|
457 | 453 | |
|
458 | 454 | cs = cStringIO.StringIO() |
|
459 | 455 | copymap = self._copymap |
|
460 | 456 | pack = struct.pack |
|
461 | 457 | write = cs.write |
|
462 | 458 | write("".join(self._pl)) |
|
463 | 459 | for f, e in self._map.iteritems(): |
|
464 | 460 | if e[0] == 'n' and e[3] == now: |
|
465 | 461 | # The file was last modified "simultaneously" with the current |
|
466 | 462 | # write to dirstate (i.e. within the same second for file- |
|
467 | 463 | # systems with a granularity of 1 sec). This commonly happens |
|
468 | 464 | # for at least a couple of files on 'update'. |
|
469 | 465 | # The user could change the file without changing its size |
|
470 | 466 | # within the same second. Invalidate the file's stat data in |
|
471 | 467 | # dirstate, forcing future 'status' calls to compare the |
|
472 | 468 | # contents of the file. This prevents mistakenly treating such |
|
473 | 469 | # files as clean. |
|
474 | 470 | e = (e[0], 0, -1, -1) # mark entry as 'unset' |
|
475 | 471 | self._map[f] = e |
|
476 | 472 | |
|
477 | 473 | if f in copymap: |
|
478 | 474 | f = "%s\0%s" % (f, copymap[f]) |
|
479 | 475 | e = pack(_format, e[0], e[1], e[2], e[3], len(f)) |
|
480 | 476 | write(e) |
|
481 | 477 | write(f) |
|
482 | 478 | st.write(cs.getvalue()) |
|
483 | 479 | st.close() |
|
484 | 480 | self._lastnormaltime = 0 |
|
485 | 481 | self._dirty = self._dirtypl = False |
|
486 | 482 | |
|
487 | 483 | def _dirignore(self, f): |
|
488 | 484 | if f == '.': |
|
489 | 485 | return False |
|
490 | 486 | if self._ignore(f): |
|
491 | 487 | return True |
|
492 | 488 | for p in _finddirs(f): |
|
493 | 489 | if self._ignore(p): |
|
494 | 490 | return True |
|
495 | 491 | return False |
|
496 | 492 | |
|
497 | 493 | def walk(self, match, subrepos, unknown, ignored): |
|
498 | 494 | ''' |
|
499 | 495 | Walk recursively through the directory tree, finding all files |
|
500 | 496 | matched by match. |
|
501 | 497 | |
|
502 | 498 | Return a dict mapping filename to stat-like object (either |
|
503 | 499 | mercurial.osutil.stat instance or return value of os.stat()). |
|
504 | 500 | ''' |
|
505 | 501 | |
|
506 | 502 | def fwarn(f, msg): |
|
507 | 503 | self._ui.warn('%s: %s\n' % (self.pathto(f), msg)) |
|
508 | 504 | return False |
|
509 | 505 | |
|
510 | 506 | def badtype(mode): |
|
511 | 507 | kind = _('unknown') |
|
512 | 508 | if stat.S_ISCHR(mode): |
|
513 | 509 | kind = _('character device') |
|
514 | 510 | elif stat.S_ISBLK(mode): |
|
515 | 511 | kind = _('block device') |
|
516 | 512 | elif stat.S_ISFIFO(mode): |
|
517 | 513 | kind = _('fifo') |
|
518 | 514 | elif stat.S_ISSOCK(mode): |
|
519 | 515 | kind = _('socket') |
|
520 | 516 | elif stat.S_ISDIR(mode): |
|
521 | 517 | kind = _('directory') |
|
522 | 518 | return _('unsupported file type (type is %s)') % kind |
|
523 | 519 | |
|
524 | 520 | ignore = self._ignore |
|
525 | 521 | dirignore = self._dirignore |
|
526 | 522 | if ignored: |
|
527 | 523 | ignore = util.never |
|
528 | 524 | dirignore = util.never |
|
529 | 525 | elif not unknown: |
|
530 | 526 | # if unknown and ignored are False, skip step 2 |
|
531 | 527 | ignore = util.always |
|
532 | 528 | dirignore = util.always |
|
533 | 529 | |
|
534 | 530 | matchfn = match.matchfn |
|
535 | 531 | badfn = match.bad |
|
536 | 532 | dmap = self._map |
|
537 | 533 | normpath = util.normpath |
|
538 | 534 | listdir = osutil.listdir |
|
539 | 535 | lstat = os.lstat |
|
540 | 536 | getkind = stat.S_IFMT |
|
541 | 537 | dirkind = stat.S_IFDIR |
|
542 | 538 | regkind = stat.S_IFREG |
|
543 | 539 | lnkkind = stat.S_IFLNK |
|
544 | 540 | join = self._join |
|
545 | 541 | work = [] |
|
546 | 542 | wadd = work.append |
|
547 | 543 | |
|
548 | 544 | exact = skipstep3 = False |
|
549 | 545 | if matchfn == match.exact: # match.exact |
|
550 | 546 | exact = True |
|
551 | 547 | dirignore = util.always # skip step 2 |
|
552 | 548 | elif match.files() and not match.anypats(): # match.match, no patterns |
|
553 | 549 | skipstep3 = True |
|
554 | 550 | |
|
555 | 551 | if self._checkcase: |
|
556 | 552 | normalize = self._normalize |
|
557 | 553 | skipstep3 = False |
|
558 | 554 | else: |
|
559 | 555 | normalize = lambda x, y: x |
|
560 | 556 | |
|
561 | 557 | files = sorted(match.files()) |
|
562 | 558 | subrepos.sort() |
|
563 | 559 | i, j = 0, 0 |
|
564 | 560 | while i < len(files) and j < len(subrepos): |
|
565 | 561 | subpath = subrepos[j] + "/" |
|
566 | 562 | if files[i] < subpath: |
|
567 | 563 | i += 1 |
|
568 | 564 | continue |
|
569 | 565 | while i < len(files) and files[i].startswith(subpath): |
|
570 | 566 | del files[i] |
|
571 | 567 | j += 1 |
|
572 | 568 | |
|
573 | 569 | if not files or '.' in files: |
|
574 | 570 | files = [''] |
|
575 | 571 | results = dict.fromkeys(subrepos) |
|
576 | 572 | results['.hg'] = None |
|
577 | 573 | |
|
578 | 574 | # step 1: find all explicit files |
|
579 | 575 | for ff in files: |
|
580 | 576 | nf = normalize(normpath(ff), False) |
|
581 | 577 | if nf in results: |
|
582 | 578 | continue |
|
583 | 579 | |
|
584 | 580 | try: |
|
585 | 581 | st = lstat(join(nf)) |
|
586 | 582 | kind = getkind(st.st_mode) |
|
587 | 583 | if kind == dirkind: |
|
588 | 584 | skipstep3 = False |
|
589 | 585 | if nf in dmap: |
|
590 | 586 | #file deleted on disk but still in dirstate |
|
591 | 587 | results[nf] = None |
|
592 | 588 | match.dir(nf) |
|
593 | 589 | if not dirignore(nf): |
|
594 | 590 | wadd(nf) |
|
595 | 591 | elif kind == regkind or kind == lnkkind: |
|
596 | 592 | results[nf] = st |
|
597 | 593 | else: |
|
598 | 594 | badfn(ff, badtype(kind)) |
|
599 | 595 | if nf in dmap: |
|
600 | 596 | results[nf] = None |
|
601 | 597 | except OSError, inst: |
|
602 | 598 | if nf in dmap: # does it exactly match a file? |
|
603 | 599 | results[nf] = None |
|
604 | 600 | else: # does it match a directory? |
|
605 | 601 | prefix = nf + "/" |
|
606 | 602 | for fn in dmap: |
|
607 | 603 | if fn.startswith(prefix): |
|
608 | 604 | match.dir(nf) |
|
609 | 605 | skipstep3 = False |
|
610 | 606 | break |
|
611 | 607 | else: |
|
612 | 608 | badfn(ff, inst.strerror) |
|
613 | 609 | |
|
614 | 610 | # step 2: visit subdirectories |
|
615 | 611 | while work: |
|
616 | 612 | nd = work.pop() |
|
617 | 613 | skip = None |
|
618 | 614 | if nd == '.': |
|
619 | 615 | nd = '' |
|
620 | 616 | else: |
|
621 | 617 | skip = '.hg' |
|
622 | 618 | try: |
|
623 | 619 | entries = listdir(join(nd), stat=True, skip=skip) |
|
624 | 620 | except OSError, inst: |
|
625 | 621 | if inst.errno == errno.EACCES: |
|
626 | 622 | fwarn(nd, inst.strerror) |
|
627 | 623 | continue |
|
628 | 624 | raise |
|
629 | 625 | for f, kind, st in entries: |
|
630 | 626 | nf = normalize(nd and (nd + "/" + f) or f, True) |
|
631 | 627 | if nf not in results: |
|
632 | 628 | if kind == dirkind: |
|
633 | 629 | if not ignore(nf): |
|
634 | 630 | match.dir(nf) |
|
635 | 631 | wadd(nf) |
|
636 | 632 | if nf in dmap and matchfn(nf): |
|
637 | 633 | results[nf] = None |
|
638 | 634 | elif kind == regkind or kind == lnkkind: |
|
639 | 635 | if nf in dmap: |
|
640 | 636 | if matchfn(nf): |
|
641 | 637 | results[nf] = st |
|
642 | 638 | elif matchfn(nf) and not ignore(nf): |
|
643 | 639 | results[nf] = st |
|
644 | 640 | elif nf in dmap and matchfn(nf): |
|
645 | 641 | results[nf] = None |
|
646 | 642 | |
|
647 | 643 | # step 3: report unseen items in the dmap hash |
|
648 | 644 | if not skipstep3 and not exact: |
|
649 | 645 | visit = sorted([f for f in dmap if f not in results and matchfn(f)]) |
|
650 | 646 | for nf, st in zip(visit, util.statfiles([join(i) for i in visit])): |
|
651 | 647 | if not st is None and not getkind(st.st_mode) in (regkind, lnkkind): |
|
652 | 648 | st = None |
|
653 | 649 | results[nf] = st |
|
654 | 650 | for s in subrepos: |
|
655 | 651 | del results[s] |
|
656 | 652 | del results['.hg'] |
|
657 | 653 | return results |
|
658 | 654 | |
|
659 | 655 | def status(self, match, subrepos, ignored, clean, unknown): |
|
660 | 656 | '''Determine the status of the working copy relative to the |
|
661 | 657 | dirstate and return a tuple of lists (unsure, modified, added, |
|
662 | 658 | removed, deleted, unknown, ignored, clean), where: |
|
663 | 659 | |
|
664 | 660 | unsure: |
|
665 | 661 | files that might have been modified since the dirstate was |
|
666 | 662 | written, but need to be read to be sure (size is the same |
|
667 | 663 | but mtime differs) |
|
668 | 664 | modified: |
|
669 | 665 | files that have definitely been modified since the dirstate |
|
670 | 666 | was written (different size or mode) |
|
671 | 667 | added: |
|
672 | 668 | files that have been explicitly added with hg add |
|
673 | 669 | removed: |
|
674 | 670 | files that have been explicitly removed with hg remove |
|
675 | 671 | deleted: |
|
676 | 672 | files that have been deleted through other means ("missing") |
|
677 | 673 | unknown: |
|
678 | 674 | files not in the dirstate that are not ignored |
|
679 | 675 | ignored: |
|
680 | 676 | files not in the dirstate that are ignored |
|
681 | 677 | (by _dirignore()) |
|
682 | 678 | clean: |
|
683 | 679 | files that have definitely not been modified since the |
|
684 | 680 | dirstate was written |
|
685 | 681 | ''' |
|
686 | 682 | listignored, listclean, listunknown = ignored, clean, unknown |
|
687 | 683 | lookup, modified, added, unknown, ignored = [], [], [], [], [] |
|
688 | 684 | removed, deleted, clean = [], [], [] |
|
689 | 685 | |
|
690 | 686 | dmap = self._map |
|
691 | 687 | ladd = lookup.append # aka "unsure" |
|
692 | 688 | madd = modified.append |
|
693 | 689 | aadd = added.append |
|
694 | 690 | uadd = unknown.append |
|
695 | 691 | iadd = ignored.append |
|
696 | 692 | radd = removed.append |
|
697 | 693 | dadd = deleted.append |
|
698 | 694 | cadd = clean.append |
|
699 | 695 | |
|
700 | 696 | lnkkind = stat.S_IFLNK |
|
701 | 697 | |
|
702 | 698 | for fn, st in self.walk(match, subrepos, listunknown, |
|
703 | 699 | listignored).iteritems(): |
|
704 | 700 | if fn not in dmap: |
|
705 | 701 | if (listignored or match.exact(fn)) and self._dirignore(fn): |
|
706 | 702 | if listignored: |
|
707 | 703 | iadd(fn) |
|
708 | 704 | elif listunknown: |
|
709 | 705 | uadd(fn) |
|
710 | 706 | continue |
|
711 | 707 | |
|
712 | 708 | state, mode, size, time = dmap[fn] |
|
713 | 709 | |
|
714 | 710 | if not st and state in "nma": |
|
715 | 711 | dadd(fn) |
|
716 | 712 | elif state == 'n': |
|
717 | 713 | # The "mode & lnkkind != lnkkind or self._checklink" |
|
718 | 714 | # lines are an expansion of "islink => checklink" |
|
719 | 715 | # where islink means "is this a link?" and checklink |
|
720 | 716 | # means "can we check links?". |
|
721 | 717 | mtime = int(st.st_mtime) |
|
722 | 718 | if (size >= 0 and |
|
723 | 719 | (size != st.st_size |
|
724 | 720 | or ((mode ^ st.st_mode) & 0100 and self._checkexec)) |
|
725 | 721 | and (mode & lnkkind != lnkkind or self._checklink) |
|
726 | 722 | or size == -2 # other parent |
|
727 | 723 | or fn in self._copymap): |
|
728 | 724 | madd(fn) |
|
729 | 725 | elif (mtime != time |
|
730 | 726 | and (mode & lnkkind != lnkkind or self._checklink)): |
|
731 | 727 | ladd(fn) |
|
732 | 728 | elif mtime == self._lastnormaltime: |
|
733 | 729 | # fn may have been changed in the same timeslot without |
|
734 | 730 | # changing its size. This can happen if we quickly do |
|
735 | 731 | # multiple commits in a single transaction. |
|
736 | 732 | # Force lookup, so we don't miss such a racy file change. |
|
737 | 733 | ladd(fn) |
|
738 | 734 | elif listclean: |
|
739 | 735 | cadd(fn) |
|
740 | 736 | elif state == 'm': |
|
741 | 737 | madd(fn) |
|
742 | 738 | elif state == 'a': |
|
743 | 739 | aadd(fn) |
|
744 | 740 | elif state == 'r': |
|
745 | 741 | radd(fn) |
|
746 | 742 | |
|
747 | 743 | return (lookup, modified, added, removed, deleted, unknown, ignored, |
|
748 | 744 | clean) |
@@ -1,594 +1,594 | |||
|
1 | 1 | # merge.py - directory-level update/merge handling for Mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from node import nullid, nullrev, hex, bin |
|
9 | 9 | from i18n import _ |
|
10 | 10 | import scmutil, util, filemerge, copies, subrepo |
|
11 | 11 | import errno, os, shutil |
|
12 | 12 | |
|
13 | 13 | class mergestate(object): |
|
14 | 14 | '''track 3-way merge state of individual files''' |
|
15 | 15 | def __init__(self, repo): |
|
16 | 16 | self._repo = repo |
|
17 | 17 | self._dirty = False |
|
18 | 18 | self._read() |
|
19 | 19 | def reset(self, node=None): |
|
20 | 20 | self._state = {} |
|
21 | 21 | if node: |
|
22 | 22 | self._local = node |
|
23 | 23 | shutil.rmtree(self._repo.join("merge"), True) |
|
24 | 24 | self._dirty = False |
|
25 | 25 | def _read(self): |
|
26 | 26 | self._state = {} |
|
27 | 27 | try: |
|
28 | 28 | f = self._repo.opener("merge/state") |
|
29 | 29 | for i, l in enumerate(f): |
|
30 | 30 | if i == 0: |
|
31 | 31 | self._local = bin(l[:-1]) |
|
32 | 32 | else: |
|
33 | 33 | bits = l[:-1].split("\0") |
|
34 | 34 | self._state[bits[0]] = bits[1:] |
|
35 | 35 | f.close() |
|
36 | 36 | except IOError, err: |
|
37 | 37 | if err.errno != errno.ENOENT: |
|
38 | 38 | raise |
|
39 | 39 | self._dirty = False |
|
40 | 40 | def commit(self): |
|
41 | 41 | if self._dirty: |
|
42 | 42 | f = self._repo.opener("merge/state", "w") |
|
43 | 43 | f.write(hex(self._local) + "\n") |
|
44 | 44 | for d, v in self._state.iteritems(): |
|
45 | 45 | f.write("\0".join([d] + v) + "\n") |
|
46 | 46 | f.close() |
|
47 | 47 | self._dirty = False |
|
48 | 48 | def add(self, fcl, fco, fca, fd, flags): |
|
49 | 49 | hash = util.sha1(fcl.path()).hexdigest() |
|
50 | 50 | self._repo.opener.write("merge/" + hash, fcl.data()) |
|
51 | 51 | self._state[fd] = ['u', hash, fcl.path(), fca.path(), |
|
52 | 52 | hex(fca.filenode()), fco.path(), flags] |
|
53 | 53 | self._dirty = True |
|
54 | 54 | def __contains__(self, dfile): |
|
55 | 55 | return dfile in self._state |
|
56 | 56 | def __getitem__(self, dfile): |
|
57 | 57 | return self._state[dfile][0] |
|
58 | 58 | def __iter__(self): |
|
59 | 59 | l = self._state.keys() |
|
60 | 60 | l.sort() |
|
61 | 61 | for f in l: |
|
62 | 62 | yield f |
|
63 | 63 | def mark(self, dfile, state): |
|
64 | 64 | self._state[dfile][0] = state |
|
65 | 65 | self._dirty = True |
|
66 | 66 | def resolve(self, dfile, wctx, octx): |
|
67 | 67 | if self[dfile] == 'r': |
|
68 | 68 | return 0 |
|
69 | 69 | state, hash, lfile, afile, anode, ofile, flags = self._state[dfile] |
|
70 | 70 | f = self._repo.opener("merge/" + hash) |
|
71 | 71 | self._repo.wwrite(dfile, f.read(), flags) |
|
72 | 72 | f.close() |
|
73 | 73 | fcd = wctx[dfile] |
|
74 | 74 | fco = octx[ofile] |
|
75 | 75 | fca = self._repo.filectx(afile, fileid=anode) |
|
76 | 76 | r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca) |
|
77 | 77 | if r is None: |
|
78 | 78 | # no real conflict |
|
79 | 79 | del self._state[dfile] |
|
80 | 80 | elif not r: |
|
81 | 81 | self.mark(dfile, 'r') |
|
82 | 82 | return r |
|
83 | 83 | |
|
84 | 84 | def _checkunknownfile(repo, wctx, mctx, f): |
|
85 | 85 | return (not repo.dirstate._ignore(f) |
|
86 | 86 | and os.path.exists(repo.wjoin(f)) |
|
87 | 87 | and mctx[f].cmp(wctx[f])) |
|
88 | 88 | |
|
89 | 89 | def _checkunknown(repo, wctx, mctx): |
|
90 | 90 | "check for collisions between unknown files and files in mctx" |
|
91 | 91 | |
|
92 | 92 | error = False |
|
93 | 93 | for f in mctx: |
|
94 | 94 | if f not in wctx and _checkunknownfile(repo, wctx, mctx, f): |
|
95 | 95 | error = True |
|
96 | 96 | wctx._repo.ui.warn(_("%s: untracked file differs\n") % f) |
|
97 | 97 | if error: |
|
98 | 98 | raise util.Abort(_("untracked files in working directory differ " |
|
99 | 99 | "from files in requested revision")) |
|
100 | 100 | |
|
101 | 101 | def _checkcollision(mctx, wctx): |
|
102 | 102 | "check for case folding collisions in the destination context" |
|
103 | 103 | folded = {} |
|
104 | 104 | for fn in mctx: |
|
105 | 105 | fold = util.normcase(fn) |
|
106 | 106 | if fold in folded: |
|
107 | 107 | raise util.Abort(_("case-folding collision between %s and %s") |
|
108 | 108 | % (fn, folded[fold])) |
|
109 | 109 | folded[fold] = fn |
|
110 | 110 | |
|
111 | 111 | if wctx: |
|
112 | 112 | for fn in wctx: |
|
113 | 113 | fold = util.normcase(fn) |
|
114 | 114 | mfn = folded.get(fold, None) |
|
115 | 115 | if mfn and (mfn != fn): |
|
116 | 116 | raise util.Abort(_("case-folding collision between %s and %s") |
|
117 | 117 | % (mfn, fn)) |
|
118 | 118 | |
|
119 | 119 | def _forgetremoved(wctx, mctx, branchmerge): |
|
120 | 120 | """ |
|
121 | 121 | Forget removed files |
|
122 | 122 | |
|
123 | 123 | If we're jumping between revisions (as opposed to merging), and if |
|
124 | 124 | neither the working directory nor the target rev has the file, |
|
125 | 125 | then we need to remove it from the dirstate, to prevent the |
|
126 | 126 | dirstate from listing the file when it is no longer in the |
|
127 | 127 | manifest. |
|
128 | 128 | |
|
129 | 129 | If we're merging, and the other revision has removed a file |
|
130 | 130 | that is not present in the working directory, we need to mark it |
|
131 | 131 | as removed. |
|
132 | 132 | """ |
|
133 | 133 | |
|
134 | 134 | action = [] |
|
135 | 135 | state = branchmerge and 'r' or 'f' |
|
136 | 136 | for f in wctx.deleted(): |
|
137 | 137 | if f not in mctx: |
|
138 | 138 | action.append((f, state)) |
|
139 | 139 | |
|
140 | 140 | if not branchmerge: |
|
141 | 141 | for f in wctx.removed(): |
|
142 | 142 | if f not in mctx: |
|
143 | 143 | action.append((f, "f")) |
|
144 | 144 | |
|
145 | 145 | return action |
|
146 | 146 | |
|
147 | 147 | def manifestmerge(repo, p1, p2, pa, overwrite, partial): |
|
148 | 148 | """ |
|
149 | 149 | Merge p1 and p2 with ancestor pa and generate merge action list |
|
150 | 150 | |
|
151 | 151 | overwrite = whether we clobber working files |
|
152 | 152 | partial = function to filter file lists |
|
153 | 153 | """ |
|
154 | 154 | |
|
155 | 155 | def fmerge(f, f2, fa): |
|
156 | 156 | """merge flags""" |
|
157 | 157 | a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2) |
|
158 | 158 | if m == n: # flags agree |
|
159 | 159 | return m # unchanged |
|
160 | 160 | if m and n and not a: # flags set, don't agree, differ from parent |
|
161 | 161 | r = repo.ui.promptchoice( |
|
162 | 162 | _(" conflicting flags for %s\n" |
|
163 | 163 | "(n)one, e(x)ec or sym(l)ink?") % f, |
|
164 | 164 | (_("&None"), _("E&xec"), _("Sym&link")), 0) |
|
165 | 165 | if r == 1: |
|
166 | 166 | return "x" # Exec |
|
167 | 167 | if r == 2: |
|
168 | 168 | return "l" # Symlink |
|
169 | 169 | return "" |
|
170 | 170 | if m and m != a: # changed from a to m |
|
171 | 171 | return m |
|
172 | 172 | if n and n != a: # changed from a to n |
|
173 |
if (n == 'l' or a == 'l') and m1 |
|
|
173 | if (n == 'l' or a == 'l') and m1.get(f) != ma.get(f): | |
|
174 | 174 | # can't automatically merge symlink flag when there |
|
175 | 175 | # are file-level conflicts here, let filemerge take |
|
176 | 176 | # care of it |
|
177 | 177 | return m |
|
178 | 178 | return n |
|
179 | 179 | return '' # flag was cleared |
|
180 | 180 | |
|
181 | 181 | def act(msg, m, f, *args): |
|
182 | 182 | repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m)) |
|
183 | 183 | action.append((f, m) + args) |
|
184 | 184 | |
|
185 | 185 | action, copy = [], {} |
|
186 | 186 | |
|
187 | 187 | if overwrite: |
|
188 | 188 | pa = p1 |
|
189 | 189 | elif pa == p2: # backwards |
|
190 | 190 | pa = p1.p1() |
|
191 | 191 | elif pa and repo.ui.configbool("merge", "followcopies", True): |
|
192 | 192 | copy, diverge = copies.mergecopies(repo, p1, p2, pa) |
|
193 | 193 | for of, fl in diverge.iteritems(): |
|
194 | 194 | act("divergent renames", "dr", of, fl) |
|
195 | 195 | |
|
196 | 196 | repo.ui.note(_("resolving manifests\n")) |
|
197 | 197 | repo.ui.debug(" overwrite: %s, partial: %s\n" |
|
198 | 198 | % (bool(overwrite), bool(partial))) |
|
199 | 199 | repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, p1, p2)) |
|
200 | 200 | |
|
201 | 201 | m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest() |
|
202 | 202 | copied = set(copy.values()) |
|
203 | 203 | |
|
204 | 204 | if '.hgsubstate' in m1: |
|
205 | 205 | # check whether sub state is modified |
|
206 | 206 | for s in p1.substate: |
|
207 | 207 | if p1.sub(s).dirty(): |
|
208 | 208 | m1['.hgsubstate'] += "+" |
|
209 | 209 | break |
|
210 | 210 | |
|
211 | 211 | # Compare manifests |
|
212 | 212 | for f, n in m1.iteritems(): |
|
213 | 213 | if partial and not partial(f): |
|
214 | 214 | continue |
|
215 | 215 | if f in m2: |
|
216 | 216 | rflags = fmerge(f, f, f) |
|
217 | 217 | a = ma.get(f, nullid) |
|
218 | 218 | if n == m2[f] or m2[f] == a: # same or local newer |
|
219 | 219 | # is file locally modified or flags need changing? |
|
220 | 220 | # dirstate flags may need to be made current |
|
221 | 221 | if m1.flags(f) != rflags or n[20:]: |
|
222 | 222 | act("update permissions", "e", f, rflags) |
|
223 | 223 | elif n == a: # remote newer |
|
224 | 224 | act("remote is newer", "g", f, rflags) |
|
225 | 225 | else: # both changed |
|
226 | 226 | act("versions differ", "m", f, f, f, rflags, False) |
|
227 | 227 | elif f in copied: # files we'll deal with on m2 side |
|
228 | 228 | pass |
|
229 | 229 | elif f in copy: |
|
230 | 230 | f2 = copy[f] |
|
231 | 231 | if f2 not in m2: # directory rename |
|
232 | 232 | act("remote renamed directory to " + f2, "d", |
|
233 | 233 | f, None, f2, m1.flags(f)) |
|
234 | 234 | else: # case 2 A,B/B/B or case 4,21 A/B/B |
|
235 | 235 | act("local copied/moved to " + f2, "m", |
|
236 | 236 | f, f2, f, fmerge(f, f2, f2), False) |
|
237 | 237 | elif f in ma: # clean, a different, no remote |
|
238 | 238 | if n != ma[f]: |
|
239 | 239 | if repo.ui.promptchoice( |
|
240 | 240 | _(" local changed %s which remote deleted\n" |
|
241 | 241 | "use (c)hanged version or (d)elete?") % f, |
|
242 | 242 | (_("&Changed"), _("&Delete")), 0): |
|
243 | 243 | act("prompt delete", "r", f) |
|
244 | 244 | else: |
|
245 | 245 | act("prompt keep", "a", f) |
|
246 | 246 | elif n[20:] == "a": # added, no remote |
|
247 | 247 | act("remote deleted", "f", f) |
|
248 | 248 | else: |
|
249 | 249 | act("other deleted", "r", f) |
|
250 | 250 | |
|
251 | 251 | for f, n in m2.iteritems(): |
|
252 | 252 | if partial and not partial(f): |
|
253 | 253 | continue |
|
254 | 254 | if f in m1 or f in copied: # files already visited |
|
255 | 255 | continue |
|
256 | 256 | if f in copy: |
|
257 | 257 | f2 = copy[f] |
|
258 | 258 | if f2 not in m1: # directory rename |
|
259 | 259 | act("local renamed directory to " + f2, "d", |
|
260 | 260 | None, f, f2, m2.flags(f)) |
|
261 | 261 | elif f2 in m2: # rename case 1, A/A,B/A |
|
262 | 262 | act("remote copied to " + f, "m", |
|
263 | 263 | f2, f, f, fmerge(f2, f, f2), False) |
|
264 | 264 | else: # case 3,20 A/B/A |
|
265 | 265 | act("remote moved to " + f, "m", |
|
266 | 266 | f2, f, f, fmerge(f2, f, f2), True) |
|
267 | 267 | elif f not in ma: |
|
268 | 268 | if (not overwrite |
|
269 | 269 | and _checkunknownfile(repo, p1, p2, f)): |
|
270 | 270 | rflags = fmerge(f, f, f) |
|
271 | 271 | act("remote differs from untracked local", |
|
272 | 272 | "m", f, f, f, rflags, False) |
|
273 | 273 | else: |
|
274 | 274 | act("remote created", "g", f, m2.flags(f)) |
|
275 | 275 | elif n != ma[f]: |
|
276 | 276 | if repo.ui.promptchoice( |
|
277 | 277 | _("remote changed %s which local deleted\n" |
|
278 | 278 | "use (c)hanged version or leave (d)eleted?") % f, |
|
279 | 279 | (_("&Changed"), _("&Deleted")), 0) == 0: |
|
280 | 280 | act("prompt recreating", "g", f, m2.flags(f)) |
|
281 | 281 | |
|
282 | 282 | return action |
|
283 | 283 | |
|
284 | 284 | def actionkey(a): |
|
285 | 285 | return a[1] == 'r' and -1 or 0, a |
|
286 | 286 | |
|
287 | 287 | def applyupdates(repo, action, wctx, mctx, actx, overwrite): |
|
288 | 288 | """apply the merge action list to the working directory |
|
289 | 289 | |
|
290 | 290 | wctx is the working copy context |
|
291 | 291 | mctx is the context to be merged into the working copy |
|
292 | 292 | actx is the context of the common ancestor |
|
293 | 293 | |
|
294 | 294 | Return a tuple of counts (updated, merged, removed, unresolved) that |
|
295 | 295 | describes how many files were affected by the update. |
|
296 | 296 | """ |
|
297 | 297 | |
|
298 | 298 | updated, merged, removed, unresolved = 0, 0, 0, 0 |
|
299 | 299 | ms = mergestate(repo) |
|
300 | 300 | ms.reset(wctx.p1().node()) |
|
301 | 301 | moves = [] |
|
302 | 302 | action.sort(key=actionkey) |
|
303 | 303 | |
|
304 | 304 | # prescan for merges |
|
305 | 305 | for a in action: |
|
306 | 306 | f, m = a[:2] |
|
307 | 307 | if m == 'm': # merge |
|
308 | 308 | f2, fd, flags, move = a[2:] |
|
309 | 309 | if f == '.hgsubstate': # merged internally |
|
310 | 310 | continue |
|
311 | 311 | repo.ui.debug("preserving %s for resolve of %s\n" % (f, fd)) |
|
312 | 312 | fcl = wctx[f] |
|
313 | 313 | fco = mctx[f2] |
|
314 | 314 | if mctx == actx: # backwards, use working dir parent as ancestor |
|
315 | 315 | if fcl.parents(): |
|
316 | 316 | fca = fcl.p1() |
|
317 | 317 | else: |
|
318 | 318 | fca = repo.filectx(f, fileid=nullrev) |
|
319 | 319 | else: |
|
320 | 320 | fca = fcl.ancestor(fco, actx) |
|
321 | 321 | if not fca: |
|
322 | 322 | fca = repo.filectx(f, fileid=nullrev) |
|
323 | 323 | ms.add(fcl, fco, fca, fd, flags) |
|
324 | 324 | if f != fd and move: |
|
325 | 325 | moves.append(f) |
|
326 | 326 | |
|
327 | 327 | audit = scmutil.pathauditor(repo.root) |
|
328 | 328 | |
|
329 | 329 | # remove renamed files after safely stored |
|
330 | 330 | for f in moves: |
|
331 | 331 | if os.path.lexists(repo.wjoin(f)): |
|
332 | 332 | repo.ui.debug("removing %s\n" % f) |
|
333 | 333 | audit(f) |
|
334 | 334 | os.unlink(repo.wjoin(f)) |
|
335 | 335 | |
|
336 | 336 | numupdates = len(action) |
|
337 | 337 | for i, a in enumerate(action): |
|
338 | 338 | f, m = a[:2] |
|
339 | 339 | repo.ui.progress(_('updating'), i + 1, item=f, total=numupdates, |
|
340 | 340 | unit=_('files')) |
|
341 | 341 | if f and f[0] == "/": |
|
342 | 342 | continue |
|
343 | 343 | if m == "r": # remove |
|
344 | 344 | repo.ui.note(_("removing %s\n") % f) |
|
345 | 345 | audit(f) |
|
346 | 346 | if f == '.hgsubstate': # subrepo states need updating |
|
347 | 347 | subrepo.submerge(repo, wctx, mctx, wctx, overwrite) |
|
348 | 348 | try: |
|
349 | 349 | util.unlinkpath(repo.wjoin(f)) |
|
350 | 350 | except OSError, inst: |
|
351 | 351 | if inst.errno != errno.ENOENT: |
|
352 | 352 | repo.ui.warn(_("update failed to remove %s: %s!\n") % |
|
353 | 353 | (f, inst.strerror)) |
|
354 | 354 | removed += 1 |
|
355 | 355 | elif m == "m": # merge |
|
356 | 356 | if f == '.hgsubstate': # subrepo states need updating |
|
357 | 357 | subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), overwrite) |
|
358 | 358 | continue |
|
359 | 359 | f2, fd, flags, move = a[2:] |
|
360 | 360 | repo.wopener.audit(fd) |
|
361 | 361 | r = ms.resolve(fd, wctx, mctx) |
|
362 | 362 | if r is not None and r > 0: |
|
363 | 363 | unresolved += 1 |
|
364 | 364 | else: |
|
365 | 365 | if r is None: |
|
366 | 366 | updated += 1 |
|
367 | 367 | else: |
|
368 | 368 | merged += 1 |
|
369 | 369 | if (move and repo.dirstate.normalize(fd) != f |
|
370 | 370 | and os.path.lexists(repo.wjoin(f))): |
|
371 | 371 | repo.ui.debug("removing %s\n" % f) |
|
372 | 372 | audit(f) |
|
373 | 373 | os.unlink(repo.wjoin(f)) |
|
374 | 374 | elif m == "g": # get |
|
375 | 375 | flags = a[2] |
|
376 | 376 | repo.ui.note(_("getting %s\n") % f) |
|
377 | 377 | t = mctx.filectx(f).data() |
|
378 | 378 | repo.wwrite(f, t, flags) |
|
379 | 379 | t = None |
|
380 | 380 | updated += 1 |
|
381 | 381 | if f == '.hgsubstate': # subrepo states need updating |
|
382 | 382 | subrepo.submerge(repo, wctx, mctx, wctx, overwrite) |
|
383 | 383 | elif m == "d": # directory rename |
|
384 | 384 | f2, fd, flags = a[2:] |
|
385 | 385 | if f: |
|
386 | 386 | repo.ui.note(_("moving %s to %s\n") % (f, fd)) |
|
387 | 387 | audit(f) |
|
388 | 388 | t = wctx.filectx(f).data() |
|
389 | 389 | repo.wwrite(fd, t, flags) |
|
390 | 390 | util.unlinkpath(repo.wjoin(f)) |
|
391 | 391 | if f2: |
|
392 | 392 | repo.ui.note(_("getting %s to %s\n") % (f2, fd)) |
|
393 | 393 | t = mctx.filectx(f2).data() |
|
394 | 394 | repo.wwrite(fd, t, flags) |
|
395 | 395 | updated += 1 |
|
396 | 396 | elif m == "dr": # divergent renames |
|
397 | 397 | fl = a[2] |
|
398 | 398 | repo.ui.warn(_("note: possible conflict - %s was renamed " |
|
399 | 399 | "multiple times to:\n") % f) |
|
400 | 400 | for nf in fl: |
|
401 | 401 | repo.ui.warn(" %s\n" % nf) |
|
402 | 402 | elif m == "e": # exec |
|
403 | 403 | flags = a[2] |
|
404 | 404 | repo.wopener.audit(f) |
|
405 | 405 | util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags) |
|
406 | 406 | ms.commit() |
|
407 | 407 | repo.ui.progress(_('updating'), None, total=numupdates, unit=_('files')) |
|
408 | 408 | |
|
409 | 409 | return updated, merged, removed, unresolved |
|
410 | 410 | |
|
411 | 411 | def recordupdates(repo, action, branchmerge): |
|
412 | 412 | "record merge actions to the dirstate" |
|
413 | 413 | |
|
414 | 414 | for a in action: |
|
415 | 415 | f, m = a[:2] |
|
416 | 416 | if m == "r": # remove |
|
417 | 417 | if branchmerge: |
|
418 | 418 | repo.dirstate.remove(f) |
|
419 | 419 | else: |
|
420 | 420 | repo.dirstate.drop(f) |
|
421 | 421 | elif m == "a": # re-add |
|
422 | 422 | if not branchmerge: |
|
423 | 423 | repo.dirstate.add(f) |
|
424 | 424 | elif m == "f": # forget |
|
425 | 425 | repo.dirstate.drop(f) |
|
426 | 426 | elif m == "e": # exec change |
|
427 | 427 | repo.dirstate.normallookup(f) |
|
428 | 428 | elif m == "g": # get |
|
429 | 429 | if branchmerge: |
|
430 | 430 | repo.dirstate.otherparent(f) |
|
431 | 431 | else: |
|
432 | 432 | repo.dirstate.normal(f) |
|
433 | 433 | elif m == "m": # merge |
|
434 | 434 | f2, fd, flag, move = a[2:] |
|
435 | 435 | if branchmerge: |
|
436 | 436 | # We've done a branch merge, mark this file as merged |
|
437 | 437 | # so that we properly record the merger later |
|
438 | 438 | repo.dirstate.merge(fd) |
|
439 | 439 | if f != f2: # copy/rename |
|
440 | 440 | if move: |
|
441 | 441 | repo.dirstate.remove(f) |
|
442 | 442 | if f != fd: |
|
443 | 443 | repo.dirstate.copy(f, fd) |
|
444 | 444 | else: |
|
445 | 445 | repo.dirstate.copy(f2, fd) |
|
446 | 446 | else: |
|
447 | 447 | # We've update-merged a locally modified file, so |
|
448 | 448 | # we set the dirstate to emulate a normal checkout |
|
449 | 449 | # of that file some time in the past. Thus our |
|
450 | 450 | # merge will appear as a normal local file |
|
451 | 451 | # modification. |
|
452 | 452 | if f2 == fd: # file not locally copied/moved |
|
453 | 453 | repo.dirstate.normallookup(fd) |
|
454 | 454 | if move: |
|
455 | 455 | repo.dirstate.drop(f) |
|
456 | 456 | elif m == "d": # directory rename |
|
457 | 457 | f2, fd, flag = a[2:] |
|
458 | 458 | if not f2 and f not in repo.dirstate: |
|
459 | 459 | # untracked file moved |
|
460 | 460 | continue |
|
461 | 461 | if branchmerge: |
|
462 | 462 | repo.dirstate.add(fd) |
|
463 | 463 | if f: |
|
464 | 464 | repo.dirstate.remove(f) |
|
465 | 465 | repo.dirstate.copy(f, fd) |
|
466 | 466 | if f2: |
|
467 | 467 | repo.dirstate.copy(f2, fd) |
|
468 | 468 | else: |
|
469 | 469 | repo.dirstate.normal(fd) |
|
470 | 470 | if f: |
|
471 | 471 | repo.dirstate.drop(f) |
|
472 | 472 | |
|
473 | 473 | def update(repo, node, branchmerge, force, partial, ancestor=None): |
|
474 | 474 | """ |
|
475 | 475 | Perform a merge between the working directory and the given node |
|
476 | 476 | |
|
477 | 477 | node = the node to update to, or None if unspecified |
|
478 | 478 | branchmerge = whether to merge between branches |
|
479 | 479 | force = whether to force branch merging or file overwriting |
|
480 | 480 | partial = a function to filter file lists (dirstate not updated) |
|
481 | 481 | |
|
482 | 482 | The table below shows all the behaviors of the update command |
|
483 | 483 | given the -c and -C or no options, whether the working directory |
|
484 | 484 | is dirty, whether a revision is specified, and the relationship of |
|
485 | 485 | the parent rev to the target rev (linear, on the same named |
|
486 | 486 | branch, or on another named branch). |
|
487 | 487 | |
|
488 | 488 | This logic is tested by test-update-branches.t. |
|
489 | 489 | |
|
490 | 490 | -c -C dirty rev | linear same cross |
|
491 | 491 | n n n n | ok (1) x |
|
492 | 492 | n n n y | ok ok ok |
|
493 | 493 | n n y * | merge (2) (2) |
|
494 | 494 | n y * * | --- discard --- |
|
495 | 495 | y n y * | --- (3) --- |
|
496 | 496 | y n n * | --- ok --- |
|
497 | 497 | y y * * | --- (4) --- |
|
498 | 498 | |
|
499 | 499 | x = can't happen |
|
500 | 500 | * = don't-care |
|
501 | 501 | 1 = abort: crosses branches (use 'hg merge' or 'hg update -c') |
|
502 | 502 | 2 = abort: crosses branches (use 'hg merge' to merge or |
|
503 | 503 | use 'hg update -C' to discard changes) |
|
504 | 504 | 3 = abort: uncommitted local changes |
|
505 | 505 | 4 = incompatible options (checked in commands.py) |
|
506 | 506 | |
|
507 | 507 | Return the same tuple as applyupdates(). |
|
508 | 508 | """ |
|
509 | 509 | |
|
510 | 510 | onode = node |
|
511 | 511 | wlock = repo.wlock() |
|
512 | 512 | try: |
|
513 | 513 | wc = repo[None] |
|
514 | 514 | if node is None: |
|
515 | 515 | # tip of current branch |
|
516 | 516 | try: |
|
517 | 517 | node = repo.branchtags()[wc.branch()] |
|
518 | 518 | except KeyError: |
|
519 | 519 | if wc.branch() == "default": # no default branch! |
|
520 | 520 | node = repo.lookup("tip") # update to tip |
|
521 | 521 | else: |
|
522 | 522 | raise util.Abort(_("branch %s not found") % wc.branch()) |
|
523 | 523 | overwrite = force and not branchmerge |
|
524 | 524 | pl = wc.parents() |
|
525 | 525 | p1, p2 = pl[0], repo[node] |
|
526 | 526 | if ancestor: |
|
527 | 527 | pa = repo[ancestor] |
|
528 | 528 | else: |
|
529 | 529 | pa = p1.ancestor(p2) |
|
530 | 530 | |
|
531 | 531 | fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2) |
|
532 | 532 | |
|
533 | 533 | ### check phase |
|
534 | 534 | if not overwrite and len(pl) > 1: |
|
535 | 535 | raise util.Abort(_("outstanding uncommitted merges")) |
|
536 | 536 | if branchmerge: |
|
537 | 537 | if pa == p2: |
|
538 | 538 | raise util.Abort(_("merging with a working directory ancestor" |
|
539 | 539 | " has no effect")) |
|
540 | 540 | elif pa == p1: |
|
541 | 541 | if p1.branch() == p2.branch(): |
|
542 | 542 | raise util.Abort(_("nothing to merge"), |
|
543 | 543 | hint=_("use 'hg update' " |
|
544 | 544 | "or check 'hg heads'")) |
|
545 | 545 | if not force and (wc.files() or wc.deleted()): |
|
546 | 546 | raise util.Abort(_("outstanding uncommitted changes"), |
|
547 | 547 | hint=_("use 'hg status' to list changes")) |
|
548 | 548 | if not force: |
|
549 | 549 | _checkunknown(repo, wc, p2) |
|
550 | 550 | for s in wc.substate: |
|
551 | 551 | if wc.sub(s).dirty(): |
|
552 | 552 | raise util.Abort(_("outstanding uncommitted changes in " |
|
553 | 553 | "subrepository '%s'") % s) |
|
554 | 554 | |
|
555 | 555 | elif not overwrite: |
|
556 | 556 | if pa == p1 or pa == p2: # linear |
|
557 | 557 | pass # all good |
|
558 | 558 | elif wc.dirty(missing=True): |
|
559 | 559 | raise util.Abort(_("crosses branches (merge branches or use" |
|
560 | 560 | " --clean to discard changes)")) |
|
561 | 561 | elif onode is None: |
|
562 | 562 | raise util.Abort(_("crosses branches (merge branches or update" |
|
563 | 563 | " --check to force update)")) |
|
564 | 564 | else: |
|
565 | 565 | # Allow jumping branches if clean and specific rev given |
|
566 | 566 | pa = p1 |
|
567 | 567 | |
|
568 | 568 | ### calculate phase |
|
569 | 569 | action = [] |
|
570 | 570 | folding = not util.checkcase(repo.path) |
|
571 | 571 | if folding: |
|
572 | 572 | _checkcollision(p2, branchmerge and p1) |
|
573 | 573 | action += _forgetremoved(wc, p2, branchmerge) |
|
574 | 574 | action += manifestmerge(repo, wc, p2, pa, overwrite, partial) |
|
575 | 575 | |
|
576 | 576 | ### apply phase |
|
577 | 577 | if not branchmerge: # just jump to the new rev |
|
578 | 578 | fp1, fp2, xp1, xp2 = fp2, nullid, xp2, '' |
|
579 | 579 | if not partial: |
|
580 | 580 | repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2) |
|
581 | 581 | |
|
582 | 582 | stats = applyupdates(repo, action, wc, p2, pa, overwrite) |
|
583 | 583 | |
|
584 | 584 | if not partial: |
|
585 | 585 | repo.dirstate.setparents(fp1, fp2) |
|
586 | 586 | recordupdates(repo, action, branchmerge) |
|
587 | 587 | if not branchmerge: |
|
588 | 588 | repo.dirstate.setbranch(p2.branch()) |
|
589 | 589 | finally: |
|
590 | 590 | wlock.release() |
|
591 | 591 | |
|
592 | 592 | if not partial: |
|
593 | 593 | repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3]) |
|
594 | 594 | return stats |
General Comments 0
You need to be logged in to leave comments.
Login now