Show More
@@ -0,0 +1,119 | |||
|
1 | # reproduce issue2264, issue2516 | |
|
2 | ||
|
3 | create test repo | |
|
4 | $ cat <<EOF >> $HGRCPATH | |
|
5 | > [extensions] | |
|
6 | > transplant = | |
|
7 | > graphlog = | |
|
8 | > EOF | |
|
9 | $ hg init repo | |
|
10 | $ cd repo | |
|
11 | $ template="{rev} {desc|firstline} [{branch}]\n" | |
|
12 | ||
|
13 | # we need to start out with two changesets on the default branch | |
|
14 | # in order to avoid the cute little optimization where transplant | |
|
15 | # pulls rather than transplants | |
|
16 | add initial changesets | |
|
17 | $ echo feature1 > file1 | |
|
18 | $ hg ci -Am"feature 1" | |
|
19 | adding file1 | |
|
20 | $ echo feature2 >> file2 | |
|
21 | $ hg ci -Am"feature 2" | |
|
22 | adding file2 | |
|
23 | ||
|
24 | # The changes to 'bugfix' are enough to show the bug: in fact, with only | |
|
25 | # those changes, it's a very noisy crash ("RuntimeError: nothing | |
|
26 | # committed after transplant"). But if we modify a second file in the | |
|
27 | # transplanted changesets, the bug is much more subtle: transplant | |
|
28 | # silently drops the second change to 'bugfix' on the floor, and we only | |
|
29 | # see it when we run 'hg status' after transplanting. Subtle data loss | |
|
30 | # bugs are worse than crashes, so reproduce the subtle case here. | |
|
31 | commit bug fixes on bug fix branch | |
|
32 | $ hg branch fixes | |
|
33 | marked working directory as branch fixes | |
|
34 | $ echo fix1 > bugfix | |
|
35 | $ echo fix1 >> file1 | |
|
36 | $ hg ci -Am"fix 1" | |
|
37 | adding bugfix | |
|
38 | $ echo fix2 > bugfix | |
|
39 | $ echo fix2 >> file1 | |
|
40 | $ hg ci -Am"fix 2" | |
|
41 | $ hg glog --template="$template" | |
|
42 | @ 3 fix 2 [fixes] | |
|
43 | | | |
|
44 | o 2 fix 1 [fixes] | |
|
45 | | | |
|
46 | o 1 feature 2 [default] | |
|
47 | | | |
|
48 | o 0 feature 1 [default] | |
|
49 | ||
|
50 | transplant bug fixes onto release branch | |
|
51 | $ hg update 0 | |
|
52 | 1 files updated, 0 files merged, 2 files removed, 0 files unresolved | |
|
53 | $ hg branch release | |
|
54 | marked working directory as branch release | |
|
55 | $ hg transplant 2 3 | |
|
56 | applying [0-9a-f]{12} (re) | |
|
57 | [0-9a-f]{12} transplanted to [0-9a-f]{12} (re) | |
|
58 | applying [0-9a-f]{12} (re) | |
|
59 | [0-9a-f]{12} transplanted to [0-9a-f]{12} (re) | |
|
60 | $ hg glog --template="$template" | |
|
61 | @ 5 fix 2 [release] | |
|
62 | | | |
|
63 | o 4 fix 1 [release] | |
|
64 | | | |
|
65 | | o 3 fix 2 [fixes] | |
|
66 | | | | |
|
67 | | o 2 fix 1 [fixes] | |
|
68 | | | | |
|
69 | | o 1 feature 2 [default] | |
|
70 | |/ | |
|
71 | o 0 feature 1 [default] | |
|
72 | ||
|
73 | $ hg status | |
|
74 | $ hg status --rev 0:4 | |
|
75 | M file1 | |
|
76 | A bugfix | |
|
77 | $ hg status --rev 4:5 | |
|
78 | M bugfix | |
|
79 | M file1 | |
|
80 | ||
|
81 | now test that we fixed the bug for all scripts/extensions | |
|
82 | $ cat > $TESTTMP/committwice.py <<__EOF__ | |
|
83 | > from mercurial import ui, hg, match, node | |
|
84 | > | |
|
85 | > def replacebyte(fn, b): | |
|
86 | > f = open("file1", "rb+") | |
|
87 | > f.seek(0, 0) | |
|
88 | > f.write(b) | |
|
89 | > f.close() | |
|
90 | > | |
|
91 | > repo = hg.repository(ui.ui(), '.') | |
|
92 | > assert len(repo) == 6, \ | |
|
93 | > "initial: len(repo) == %d, expected 6" % len(repo) | |
|
94 | > try: | |
|
95 | > wlock = repo.wlock() | |
|
96 | > lock = repo.lock() | |
|
97 | > m = match.exact(repo.root, '', ['file1']) | |
|
98 | > replacebyte("file1", "x") | |
|
99 | > n = repo.commit(text="x", user="test", date=(0, 0), match=m) | |
|
100 | > print "commit 1: len(repo) == %d" % len(repo) | |
|
101 | > replacebyte("file1", "y") | |
|
102 | > n = repo.commit(text="y", user="test", date=(0, 0), match=m) | |
|
103 | > print "commit 2: len(repo) == %d" % len(repo) | |
|
104 | > finally: | |
|
105 | > lock.release() | |
|
106 | > wlock.release() | |
|
107 | > __EOF__ | |
|
108 | $ $PYTHON $TESTTMP/committwice.py | |
|
109 | commit 1: len(repo) == 7 | |
|
110 | commit 2: len(repo) == 8 | |
|
111 | ||
|
112 | Do a size-preserving modification outside of that process | |
|
113 | $ echo abcd > bugfix | |
|
114 | $ hg status | |
|
115 | M bugfix | |
|
116 | $ hg log --template "{rev} {desc} {files}\n" -r5: | |
|
117 | 5 fix 2 bugfix file1 | |
|
118 | 6 x file1 | |
|
119 | 7 y file1 |
@@ -1,685 +1,711 | |||
|
1 | 1 | # dirstate.py - working directory tracking for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from node import nullid |
|
9 | 9 | from i18n import _ |
|
10 | 10 | import util, ignore, osutil, parsers, encoding |
|
11 | 11 | import struct, os, stat, errno |
|
12 | 12 | import cStringIO |
|
13 | 13 | |
|
14 | 14 | _format = ">cllll" |
|
15 | 15 | propertycache = util.propertycache |
|
16 | 16 | |
|
17 | 17 | def _finddirs(path): |
|
18 | 18 | pos = path.rfind('/') |
|
19 | 19 | while pos != -1: |
|
20 | 20 | yield path[:pos] |
|
21 | 21 | pos = path.rfind('/', 0, pos) |
|
22 | 22 | |
|
23 | 23 | def _incdirs(dirs, path): |
|
24 | 24 | for base in _finddirs(path): |
|
25 | 25 | if base in dirs: |
|
26 | 26 | dirs[base] += 1 |
|
27 | 27 | return |
|
28 | 28 | dirs[base] = 1 |
|
29 | 29 | |
|
30 | 30 | def _decdirs(dirs, path): |
|
31 | 31 | for base in _finddirs(path): |
|
32 | 32 | if dirs[base] > 1: |
|
33 | 33 | dirs[base] -= 1 |
|
34 | 34 | return |
|
35 | 35 | del dirs[base] |
|
36 | 36 | |
|
37 | 37 | class dirstate(object): |
|
38 | 38 | |
|
39 | 39 | def __init__(self, opener, ui, root, validate): |
|
40 | 40 | '''Create a new dirstate object. |
|
41 | 41 | |
|
42 | 42 | opener is an open()-like callable that can be used to open the |
|
43 | 43 | dirstate file; root is the root of the directory tracked by |
|
44 | 44 | the dirstate. |
|
45 | 45 | ''' |
|
46 | 46 | self._opener = opener |
|
47 | 47 | self._validate = validate |
|
48 | 48 | self._root = root |
|
49 | 49 | self._rootdir = os.path.join(root, '') |
|
50 | 50 | self._dirty = False |
|
51 | 51 | self._dirtypl = False |
|
52 | self._lastnormal = set() # files believed to be normal | |
|
52 | 53 | self._ui = ui |
|
53 | 54 | |
|
54 | 55 | @propertycache |
|
55 | 56 | def _map(self): |
|
56 | 57 | '''Return the dirstate contents as a map from filename to |
|
57 | 58 | (state, mode, size, time).''' |
|
58 | 59 | self._read() |
|
59 | 60 | return self._map |
|
60 | 61 | |
|
61 | 62 | @propertycache |
|
62 | 63 | def _copymap(self): |
|
63 | 64 | self._read() |
|
64 | 65 | return self._copymap |
|
65 | 66 | |
|
66 | 67 | @propertycache |
|
67 | 68 | def _foldmap(self): |
|
68 | 69 | f = {} |
|
69 | 70 | for name in self._map: |
|
70 | 71 | f[os.path.normcase(name)] = name |
|
71 | 72 | return f |
|
72 | 73 | |
|
73 | 74 | @propertycache |
|
74 | 75 | def _branch(self): |
|
75 | 76 | try: |
|
76 | 77 | return self._opener("branch").read().strip() or "default" |
|
77 | 78 | except IOError: |
|
78 | 79 | return "default" |
|
79 | 80 | |
|
80 | 81 | @propertycache |
|
81 | 82 | def _pl(self): |
|
82 | 83 | try: |
|
83 | 84 | fp = self._opener("dirstate") |
|
84 | 85 | st = fp.read(40) |
|
85 | 86 | fp.close() |
|
86 | 87 | l = len(st) |
|
87 | 88 | if l == 40: |
|
88 | 89 | return st[:20], st[20:40] |
|
89 | 90 | elif l > 0 and l < 40: |
|
90 | 91 | raise util.Abort(_('working directory state appears damaged!')) |
|
91 | 92 | except IOError, err: |
|
92 | 93 | if err.errno != errno.ENOENT: |
|
93 | 94 | raise |
|
94 | 95 | return [nullid, nullid] |
|
95 | 96 | |
|
96 | 97 | @propertycache |
|
97 | 98 | def _dirs(self): |
|
98 | 99 | dirs = {} |
|
99 | 100 | for f, s in self._map.iteritems(): |
|
100 | 101 | if s[0] != 'r': |
|
101 | 102 | _incdirs(dirs, f) |
|
102 | 103 | return dirs |
|
103 | 104 | |
|
104 | 105 | @propertycache |
|
105 | 106 | def _ignore(self): |
|
106 | 107 | files = [self._join('.hgignore')] |
|
107 | 108 | for name, path in self._ui.configitems("ui"): |
|
108 | 109 | if name == 'ignore' or name.startswith('ignore.'): |
|
109 | 110 | files.append(util.expandpath(path)) |
|
110 | 111 | return ignore.ignore(self._root, files, self._ui.warn) |
|
111 | 112 | |
|
112 | 113 | @propertycache |
|
113 | 114 | def _slash(self): |
|
114 | 115 | return self._ui.configbool('ui', 'slash') and os.sep != '/' |
|
115 | 116 | |
|
116 | 117 | @propertycache |
|
117 | 118 | def _checklink(self): |
|
118 | 119 | return util.checklink(self._root) |
|
119 | 120 | |
|
120 | 121 | @propertycache |
|
121 | 122 | def _checkexec(self): |
|
122 | 123 | return util.checkexec(self._root) |
|
123 | 124 | |
|
124 | 125 | @propertycache |
|
125 | 126 | def _checkcase(self): |
|
126 | 127 | return not util.checkcase(self._join('.hg')) |
|
127 | 128 | |
|
128 | 129 | def _join(self, f): |
|
129 | 130 | # much faster than os.path.join() |
|
130 | 131 | # it's safe because f is always a relative path |
|
131 | 132 | return self._rootdir + f |
|
132 | 133 | |
|
133 | 134 | def flagfunc(self, fallback): |
|
134 | 135 | if self._checklink: |
|
135 | 136 | if self._checkexec: |
|
136 | 137 | def f(x): |
|
137 | 138 | p = self._join(x) |
|
138 | 139 | if os.path.islink(p): |
|
139 | 140 | return 'l' |
|
140 | 141 | if util.is_exec(p): |
|
141 | 142 | return 'x' |
|
142 | 143 | return '' |
|
143 | 144 | return f |
|
144 | 145 | def f(x): |
|
145 | 146 | if os.path.islink(self._join(x)): |
|
146 | 147 | return 'l' |
|
147 | 148 | if 'x' in fallback(x): |
|
148 | 149 | return 'x' |
|
149 | 150 | return '' |
|
150 | 151 | return f |
|
151 | 152 | if self._checkexec: |
|
152 | 153 | def f(x): |
|
153 | 154 | if 'l' in fallback(x): |
|
154 | 155 | return 'l' |
|
155 | 156 | if util.is_exec(self._join(x)): |
|
156 | 157 | return 'x' |
|
157 | 158 | return '' |
|
158 | 159 | return f |
|
159 | 160 | return fallback |
|
160 | 161 | |
|
161 | 162 | def getcwd(self): |
|
162 | 163 | cwd = os.getcwd() |
|
163 | 164 | if cwd == self._root: |
|
164 | 165 | return '' |
|
165 | 166 | # self._root ends with a path separator if self._root is '/' or 'C:\' |
|
166 | 167 | rootsep = self._root |
|
167 | 168 | if not util.endswithsep(rootsep): |
|
168 | 169 | rootsep += os.sep |
|
169 | 170 | if cwd.startswith(rootsep): |
|
170 | 171 | return cwd[len(rootsep):] |
|
171 | 172 | else: |
|
172 | 173 | # we're outside the repo. return an absolute path. |
|
173 | 174 | return cwd |
|
174 | 175 | |
|
175 | 176 | def pathto(self, f, cwd=None): |
|
176 | 177 | if cwd is None: |
|
177 | 178 | cwd = self.getcwd() |
|
178 | 179 | path = util.pathto(self._root, cwd, f) |
|
179 | 180 | if self._slash: |
|
180 | 181 | return util.normpath(path) |
|
181 | 182 | return path |
|
182 | 183 | |
|
183 | 184 | def __getitem__(self, key): |
|
184 | 185 | '''Return the current state of key (a filename) in the dirstate. |
|
185 | 186 | |
|
186 | 187 | States are: |
|
187 | 188 | n normal |
|
188 | 189 | m needs merging |
|
189 | 190 | r marked for removal |
|
190 | 191 | a marked for addition |
|
191 | 192 | ? not tracked |
|
192 | 193 | ''' |
|
193 | 194 | return self._map.get(key, ("?",))[0] |
|
194 | 195 | |
|
195 | 196 | def __contains__(self, key): |
|
196 | 197 | return key in self._map |
|
197 | 198 | |
|
198 | 199 | def __iter__(self): |
|
199 | 200 | for x in sorted(self._map): |
|
200 | 201 | yield x |
|
201 | 202 | |
|
202 | 203 | def parents(self): |
|
203 | 204 | return [self._validate(p) for p in self._pl] |
|
204 | 205 | |
|
205 | 206 | def branch(self): |
|
206 | 207 | return encoding.tolocal(self._branch) |
|
207 | 208 | |
|
208 | 209 | def setparents(self, p1, p2=nullid): |
|
209 | 210 | self._dirty = self._dirtypl = True |
|
210 | 211 | self._pl = p1, p2 |
|
211 | 212 | |
|
212 | 213 | def setbranch(self, branch): |
|
213 | 214 | if branch in ['tip', '.', 'null']: |
|
214 | 215 | raise util.Abort(_('the name \'%s\' is reserved') % branch) |
|
215 | 216 | self._branch = encoding.fromlocal(branch) |
|
216 | 217 | self._opener("branch", "w").write(self._branch + '\n') |
|
217 | 218 | |
|
218 | 219 | def _read(self): |
|
219 | 220 | self._map = {} |
|
220 | 221 | self._copymap = {} |
|
221 | 222 | try: |
|
222 | 223 | st = self._opener("dirstate").read() |
|
223 | 224 | except IOError, err: |
|
224 | 225 | if err.errno != errno.ENOENT: |
|
225 | 226 | raise |
|
226 | 227 | return |
|
227 | 228 | if not st: |
|
228 | 229 | return |
|
229 | 230 | |
|
230 | 231 | p = parsers.parse_dirstate(self._map, self._copymap, st) |
|
231 | 232 | if not self._dirtypl: |
|
232 | 233 | self._pl = p |
|
233 | 234 | |
|
234 | 235 | def invalidate(self): |
|
235 | 236 | for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs", |
|
236 | 237 | "_ignore"): |
|
237 | 238 | if a in self.__dict__: |
|
238 | 239 | delattr(self, a) |
|
239 | 240 | self._dirty = False |
|
240 | 241 | |
|
241 | 242 | def copy(self, source, dest): |
|
242 | 243 | """Mark dest as a copy of source. Unmark dest if source is None.""" |
|
243 | 244 | if source == dest: |
|
244 | 245 | return |
|
245 | 246 | self._dirty = True |
|
246 | 247 | if source is not None: |
|
247 | 248 | self._copymap[dest] = source |
|
248 | 249 | elif dest in self._copymap: |
|
249 | 250 | del self._copymap[dest] |
|
250 | 251 | |
|
251 | 252 | def copied(self, file): |
|
252 | 253 | return self._copymap.get(file, None) |
|
253 | 254 | |
|
254 | 255 | def copies(self): |
|
255 | 256 | return self._copymap |
|
256 | 257 | |
|
257 | 258 | def _droppath(self, f): |
|
258 | 259 | if self[f] not in "?r" and "_dirs" in self.__dict__: |
|
259 | 260 | _decdirs(self._dirs, f) |
|
260 | 261 | |
|
261 | 262 | def _addpath(self, f, check=False): |
|
262 | 263 | oldstate = self[f] |
|
263 | 264 | if check or oldstate == "r": |
|
264 | 265 | if '\r' in f or '\n' in f: |
|
265 | 266 | raise util.Abort( |
|
266 | 267 | _("'\\n' and '\\r' disallowed in filenames: %r") % f) |
|
267 | 268 | if f in self._dirs: |
|
268 | 269 | raise util.Abort(_('directory %r already in dirstate') % f) |
|
269 | 270 | # shadows |
|
270 | 271 | for d in _finddirs(f): |
|
271 | 272 | if d in self._dirs: |
|
272 | 273 | break |
|
273 | 274 | if d in self._map and self[d] != 'r': |
|
274 | 275 | raise util.Abort( |
|
275 | 276 | _('file %r in dirstate clashes with %r') % (d, f)) |
|
276 | 277 | if oldstate in "?r" and "_dirs" in self.__dict__: |
|
277 | 278 | _incdirs(self._dirs, f) |
|
278 | 279 | |
|
279 | 280 | def normal(self, f): |
|
280 | 281 | '''Mark a file normal and clean.''' |
|
281 | 282 | self._dirty = True |
|
282 | 283 | self._addpath(f) |
|
283 | 284 | s = os.lstat(self._join(f)) |
|
284 | 285 | self._map[f] = ('n', s.st_mode, s.st_size, int(s.st_mtime)) |
|
285 | 286 | if f in self._copymap: |
|
286 | 287 | del self._copymap[f] |
|
287 | 288 | |
|
289 | # Right now, this file is clean: but if some code in this | |
|
290 | # process modifies it without changing its size before the clock | |
|
291 | # ticks over to the next second, then it won't be clean anymore. | |
|
292 | # So make sure that status() will look harder at it. | |
|
293 | self._lastnormal.add(f) | |
|
294 | ||
|
288 | 295 | def normallookup(self, f): |
|
289 | 296 | '''Mark a file normal, but possibly dirty.''' |
|
290 | 297 | if self._pl[1] != nullid and f in self._map: |
|
291 | 298 | # if there is a merge going on and the file was either |
|
292 | 299 | # in state 'm' (-1) or coming from other parent (-2) before |
|
293 | 300 | # being removed, restore that state. |
|
294 | 301 | entry = self._map[f] |
|
295 | 302 | if entry[0] == 'r' and entry[2] in (-1, -2): |
|
296 | 303 | source = self._copymap.get(f) |
|
297 | 304 | if entry[2] == -1: |
|
298 | 305 | self.merge(f) |
|
299 | 306 | elif entry[2] == -2: |
|
300 | 307 | self.otherparent(f) |
|
301 | 308 | if source: |
|
302 | 309 | self.copy(source, f) |
|
303 | 310 | return |
|
304 | 311 | if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2: |
|
305 | 312 | return |
|
306 | 313 | self._dirty = True |
|
307 | 314 | self._addpath(f) |
|
308 | 315 | self._map[f] = ('n', 0, -1, -1) |
|
309 | 316 | if f in self._copymap: |
|
310 | 317 | del self._copymap[f] |
|
318 | self._lastnormal.discard(f) | |
|
311 | 319 | |
|
312 | 320 | def otherparent(self, f): |
|
313 | 321 | '''Mark as coming from the other parent, always dirty.''' |
|
314 | 322 | if self._pl[1] == nullid: |
|
315 | 323 | raise util.Abort(_("setting %r to other parent " |
|
316 | 324 | "only allowed in merges") % f) |
|
317 | 325 | self._dirty = True |
|
318 | 326 | self._addpath(f) |
|
319 | 327 | self._map[f] = ('n', 0, -2, -1) |
|
320 | 328 | if f in self._copymap: |
|
321 | 329 | del self._copymap[f] |
|
330 | self._lastnormal.discard(f) | |
|
322 | 331 | |
|
323 | 332 | def add(self, f): |
|
324 | 333 | '''Mark a file added.''' |
|
325 | 334 | self._dirty = True |
|
326 | 335 | self._addpath(f, True) |
|
327 | 336 | self._map[f] = ('a', 0, -1, -1) |
|
328 | 337 | if f in self._copymap: |
|
329 | 338 | del self._copymap[f] |
|
339 | self._lastnormal.discard(f) | |
|
330 | 340 | |
|
331 | 341 | def remove(self, f): |
|
332 | 342 | '''Mark a file removed.''' |
|
333 | 343 | self._dirty = True |
|
334 | 344 | self._droppath(f) |
|
335 | 345 | size = 0 |
|
336 | 346 | if self._pl[1] != nullid and f in self._map: |
|
337 | 347 | # backup the previous state |
|
338 | 348 | entry = self._map[f] |
|
339 | 349 | if entry[0] == 'm': # merge |
|
340 | 350 | size = -1 |
|
341 | 351 | elif entry[0] == 'n' and entry[2] == -2: # other parent |
|
342 | 352 | size = -2 |
|
343 | 353 | self._map[f] = ('r', 0, size, 0) |
|
344 | 354 | if size == 0 and f in self._copymap: |
|
345 | 355 | del self._copymap[f] |
|
356 | self._lastnormal.discard(f) | |
|
346 | 357 | |
|
347 | 358 | def merge(self, f): |
|
348 | 359 | '''Mark a file merged.''' |
|
349 | 360 | self._dirty = True |
|
350 | 361 | s = os.lstat(self._join(f)) |
|
351 | 362 | self._addpath(f) |
|
352 | 363 | self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime)) |
|
353 | 364 | if f in self._copymap: |
|
354 | 365 | del self._copymap[f] |
|
366 | self._lastnormal.discard(f) | |
|
355 | 367 | |
|
356 | 368 | def forget(self, f): |
|
357 | 369 | '''Forget a file.''' |
|
358 | 370 | self._dirty = True |
|
359 | 371 | try: |
|
360 | 372 | self._droppath(f) |
|
361 | 373 | del self._map[f] |
|
362 | 374 | except KeyError: |
|
363 | 375 | self._ui.warn(_("not in dirstate: %s\n") % f) |
|
376 | self._lastnormal.discard(f) | |
|
364 | 377 | |
|
365 | 378 | def _normalize(self, path, knownpath): |
|
366 | 379 | norm_path = os.path.normcase(path) |
|
367 | 380 | fold_path = self._foldmap.get(norm_path, None) |
|
368 | 381 | if fold_path is None: |
|
369 | 382 | if knownpath or not os.path.lexists(os.path.join(self._root, path)): |
|
370 | 383 | fold_path = path |
|
371 | 384 | else: |
|
372 | 385 | fold_path = self._foldmap.setdefault(norm_path, |
|
373 | 386 | util.fspath(path, self._root)) |
|
374 | 387 | return fold_path |
|
375 | 388 | |
|
376 | 389 | def clear(self): |
|
377 | 390 | self._map = {} |
|
378 | 391 | if "_dirs" in self.__dict__: |
|
379 | 392 | delattr(self, "_dirs") |
|
380 | 393 | self._copymap = {} |
|
381 | 394 | self._pl = [nullid, nullid] |
|
382 | 395 | self._dirty = True |
|
383 | 396 | |
|
384 | 397 | def rebuild(self, parent, files): |
|
385 | 398 | self.clear() |
|
386 | 399 | for f in files: |
|
387 | 400 | if 'x' in files.flags(f): |
|
388 | 401 | self._map[f] = ('n', 0777, -1, 0) |
|
389 | 402 | else: |
|
390 | 403 | self._map[f] = ('n', 0666, -1, 0) |
|
391 | 404 | self._pl = (parent, nullid) |
|
392 | 405 | self._dirty = True |
|
393 | 406 | |
|
394 | 407 | def write(self): |
|
395 | 408 | if not self._dirty: |
|
396 | 409 | return |
|
397 | 410 | st = self._opener("dirstate", "w", atomictemp=True) |
|
398 | 411 | |
|
399 | 412 | # use the modification time of the newly created temporary file as the |
|
400 | 413 | # filesystem's notion of 'now' |
|
401 | 414 | now = int(util.fstat(st).st_mtime) |
|
402 | 415 | |
|
403 | 416 | cs = cStringIO.StringIO() |
|
404 | 417 | copymap = self._copymap |
|
405 | 418 | pack = struct.pack |
|
406 | 419 | write = cs.write |
|
407 | 420 | write("".join(self._pl)) |
|
408 | 421 | for f, e in self._map.iteritems(): |
|
409 | 422 | if e[0] == 'n' and e[3] == now: |
|
410 | 423 | # The file was last modified "simultaneously" with the current |
|
411 | 424 | # write to dirstate (i.e. within the same second for file- |
|
412 | 425 | # systems with a granularity of 1 sec). This commonly happens |
|
413 | 426 | # for at least a couple of files on 'update'. |
|
414 | 427 | # The user could change the file without changing its size |
|
415 | 428 | # within the same second. Invalidate the file's stat data in |
|
416 | 429 | # dirstate, forcing future 'status' calls to compare the |
|
417 | 430 | # contents of the file. This prevents mistakenly treating such |
|
418 | 431 | # files as clean. |
|
419 | 432 | e = (e[0], 0, -1, -1) # mark entry as 'unset' |
|
420 | 433 | self._map[f] = e |
|
421 | 434 | |
|
422 | 435 | if f in copymap: |
|
423 | 436 | f = "%s\0%s" % (f, copymap[f]) |
|
424 | 437 | e = pack(_format, e[0], e[1], e[2], e[3], len(f)) |
|
425 | 438 | write(e) |
|
426 | 439 | write(f) |
|
427 | 440 | st.write(cs.getvalue()) |
|
428 | 441 | st.rename() |
|
429 | 442 | self._dirty = self._dirtypl = False |
|
430 | 443 | |
|
431 | 444 | def _dirignore(self, f): |
|
432 | 445 | if f == '.': |
|
433 | 446 | return False |
|
434 | 447 | if self._ignore(f): |
|
435 | 448 | return True |
|
436 | 449 | for p in _finddirs(f): |
|
437 | 450 | if self._ignore(p): |
|
438 | 451 | return True |
|
439 | 452 | return False |
|
440 | 453 | |
|
441 | 454 | def walk(self, match, subrepos, unknown, ignored): |
|
442 | 455 | ''' |
|
443 | 456 | Walk recursively through the directory tree, finding all files |
|
444 | 457 | matched by match. |
|
445 | 458 | |
|
446 | 459 | Return a dict mapping filename to stat-like object (either |
|
447 | 460 | mercurial.osutil.stat instance or return value of os.stat()). |
|
448 | 461 | ''' |
|
449 | 462 | |
|
450 | 463 | def fwarn(f, msg): |
|
451 | 464 | self._ui.warn('%s: %s\n' % (self.pathto(f), msg)) |
|
452 | 465 | return False |
|
453 | 466 | |
|
454 | 467 | def badtype(mode): |
|
455 | 468 | kind = _('unknown') |
|
456 | 469 | if stat.S_ISCHR(mode): |
|
457 | 470 | kind = _('character device') |
|
458 | 471 | elif stat.S_ISBLK(mode): |
|
459 | 472 | kind = _('block device') |
|
460 | 473 | elif stat.S_ISFIFO(mode): |
|
461 | 474 | kind = _('fifo') |
|
462 | 475 | elif stat.S_ISSOCK(mode): |
|
463 | 476 | kind = _('socket') |
|
464 | 477 | elif stat.S_ISDIR(mode): |
|
465 | 478 | kind = _('directory') |
|
466 | 479 | return _('unsupported file type (type is %s)') % kind |
|
467 | 480 | |
|
468 | 481 | ignore = self._ignore |
|
469 | 482 | dirignore = self._dirignore |
|
470 | 483 | if ignored: |
|
471 | 484 | ignore = util.never |
|
472 | 485 | dirignore = util.never |
|
473 | 486 | elif not unknown: |
|
474 | 487 | # if unknown and ignored are False, skip step 2 |
|
475 | 488 | ignore = util.always |
|
476 | 489 | dirignore = util.always |
|
477 | 490 | |
|
478 | 491 | matchfn = match.matchfn |
|
479 | 492 | badfn = match.bad |
|
480 | 493 | dmap = self._map |
|
481 | 494 | normpath = util.normpath |
|
482 | 495 | listdir = osutil.listdir |
|
483 | 496 | lstat = os.lstat |
|
484 | 497 | getkind = stat.S_IFMT |
|
485 | 498 | dirkind = stat.S_IFDIR |
|
486 | 499 | regkind = stat.S_IFREG |
|
487 | 500 | lnkkind = stat.S_IFLNK |
|
488 | 501 | join = self._join |
|
489 | 502 | work = [] |
|
490 | 503 | wadd = work.append |
|
491 | 504 | |
|
492 | 505 | exact = skipstep3 = False |
|
493 | 506 | if matchfn == match.exact: # match.exact |
|
494 | 507 | exact = True |
|
495 | 508 | dirignore = util.always # skip step 2 |
|
496 | 509 | elif match.files() and not match.anypats(): # match.match, no patterns |
|
497 | 510 | skipstep3 = True |
|
498 | 511 | |
|
499 | 512 | if self._checkcase: |
|
500 | 513 | normalize = self._normalize |
|
501 | 514 | skipstep3 = False |
|
502 | 515 | else: |
|
503 | 516 | normalize = lambda x, y: x |
|
504 | 517 | |
|
505 | 518 | files = sorted(match.files()) |
|
506 | 519 | subrepos.sort() |
|
507 | 520 | i, j = 0, 0 |
|
508 | 521 | while i < len(files) and j < len(subrepos): |
|
509 | 522 | subpath = subrepos[j] + "/" |
|
510 | 523 | if files[i] < subpath: |
|
511 | 524 | i += 1 |
|
512 | 525 | continue |
|
513 | 526 | while i < len(files) and files[i].startswith(subpath): |
|
514 | 527 | del files[i] |
|
515 | 528 | j += 1 |
|
516 | 529 | |
|
517 | 530 | if not files or '.' in files: |
|
518 | 531 | files = [''] |
|
519 | 532 | results = dict.fromkeys(subrepos) |
|
520 | 533 | results['.hg'] = None |
|
521 | 534 | |
|
522 | 535 | # step 1: find all explicit files |
|
523 | 536 | for ff in files: |
|
524 | 537 | nf = normalize(normpath(ff), False) |
|
525 | 538 | if nf in results: |
|
526 | 539 | continue |
|
527 | 540 | |
|
528 | 541 | try: |
|
529 | 542 | st = lstat(join(nf)) |
|
530 | 543 | kind = getkind(st.st_mode) |
|
531 | 544 | if kind == dirkind: |
|
532 | 545 | skipstep3 = False |
|
533 | 546 | if nf in dmap: |
|
534 | 547 | #file deleted on disk but still in dirstate |
|
535 | 548 | results[nf] = None |
|
536 | 549 | match.dir(nf) |
|
537 | 550 | if not dirignore(nf): |
|
538 | 551 | wadd(nf) |
|
539 | 552 | elif kind == regkind or kind == lnkkind: |
|
540 | 553 | results[nf] = st |
|
541 | 554 | else: |
|
542 | 555 | badfn(ff, badtype(kind)) |
|
543 | 556 | if nf in dmap: |
|
544 | 557 | results[nf] = None |
|
545 | 558 | except OSError, inst: |
|
546 | 559 | if nf in dmap: # does it exactly match a file? |
|
547 | 560 | results[nf] = None |
|
548 | 561 | else: # does it match a directory? |
|
549 | 562 | prefix = nf + "/" |
|
550 | 563 | for fn in dmap: |
|
551 | 564 | if fn.startswith(prefix): |
|
552 | 565 | match.dir(nf) |
|
553 | 566 | skipstep3 = False |
|
554 | 567 | break |
|
555 | 568 | else: |
|
556 | 569 | badfn(ff, inst.strerror) |
|
557 | 570 | |
|
558 | 571 | # step 2: visit subdirectories |
|
559 | 572 | while work: |
|
560 | 573 | nd = work.pop() |
|
561 | 574 | skip = None |
|
562 | 575 | if nd == '.': |
|
563 | 576 | nd = '' |
|
564 | 577 | else: |
|
565 | 578 | skip = '.hg' |
|
566 | 579 | try: |
|
567 | 580 | entries = listdir(join(nd), stat=True, skip=skip) |
|
568 | 581 | except OSError, inst: |
|
569 | 582 | if inst.errno == errno.EACCES: |
|
570 | 583 | fwarn(nd, inst.strerror) |
|
571 | 584 | continue |
|
572 | 585 | raise |
|
573 | 586 | for f, kind, st in entries: |
|
574 | 587 | nf = normalize(nd and (nd + "/" + f) or f, True) |
|
575 | 588 | if nf not in results: |
|
576 | 589 | if kind == dirkind: |
|
577 | 590 | if not ignore(nf): |
|
578 | 591 | match.dir(nf) |
|
579 | 592 | wadd(nf) |
|
580 | 593 | if nf in dmap and matchfn(nf): |
|
581 | 594 | results[nf] = None |
|
582 | 595 | elif kind == regkind or kind == lnkkind: |
|
583 | 596 | if nf in dmap: |
|
584 | 597 | if matchfn(nf): |
|
585 | 598 | results[nf] = st |
|
586 | 599 | elif matchfn(nf) and not ignore(nf): |
|
587 | 600 | results[nf] = st |
|
588 | 601 | elif nf in dmap and matchfn(nf): |
|
589 | 602 | results[nf] = None |
|
590 | 603 | |
|
591 | 604 | # step 3: report unseen items in the dmap hash |
|
592 | 605 | if not skipstep3 and not exact: |
|
593 | 606 | visit = sorted([f for f in dmap if f not in results and matchfn(f)]) |
|
594 | 607 | for nf, st in zip(visit, util.statfiles([join(i) for i in visit])): |
|
595 | 608 | if not st is None and not getkind(st.st_mode) in (regkind, lnkkind): |
|
596 | 609 | st = None |
|
597 | 610 | results[nf] = st |
|
598 | 611 | for s in subrepos: |
|
599 | 612 | del results[s] |
|
600 | 613 | del results['.hg'] |
|
601 | 614 | return results |
|
602 | 615 | |
|
603 | 616 | def status(self, match, subrepos, ignored, clean, unknown): |
|
604 | 617 | '''Determine the status of the working copy relative to the |
|
605 | 618 | dirstate and return a tuple of lists (unsure, modified, added, |
|
606 | 619 | removed, deleted, unknown, ignored, clean), where: |
|
607 | 620 | |
|
608 | 621 | unsure: |
|
609 | 622 | files that might have been modified since the dirstate was |
|
610 | 623 | written, but need to be read to be sure (size is the same |
|
611 | 624 | but mtime differs) |
|
612 | 625 | modified: |
|
613 | 626 | files that have definitely been modified since the dirstate |
|
614 | 627 | was written (different size or mode) |
|
615 | 628 | added: |
|
616 | 629 | files that have been explicitly added with hg add |
|
617 | 630 | removed: |
|
618 | 631 | files that have been explicitly removed with hg remove |
|
619 | 632 | deleted: |
|
620 | 633 | files that have been deleted through other means ("missing") |
|
621 | 634 | unknown: |
|
622 | 635 | files not in the dirstate that are not ignored |
|
623 | 636 | ignored: |
|
624 | 637 | files not in the dirstate that are ignored |
|
625 | 638 | (by _dirignore()) |
|
626 | 639 | clean: |
|
627 | 640 | files that have definitely not been modified since the |
|
628 | 641 | dirstate was written |
|
629 | 642 | ''' |
|
630 | 643 | listignored, listclean, listunknown = ignored, clean, unknown |
|
631 | 644 | lookup, modified, added, unknown, ignored = [], [], [], [], [] |
|
632 | 645 | removed, deleted, clean = [], [], [] |
|
633 | 646 | |
|
634 | 647 | dmap = self._map |
|
635 | 648 | ladd = lookup.append # aka "unsure" |
|
636 | 649 | madd = modified.append |
|
637 | 650 | aadd = added.append |
|
638 | 651 | uadd = unknown.append |
|
639 | 652 | iadd = ignored.append |
|
640 | 653 | radd = removed.append |
|
641 | 654 | dadd = deleted.append |
|
642 | 655 | cadd = clean.append |
|
656 | lastnormal = self._lastnormal.__contains__ | |
|
643 | 657 | |
|
644 | 658 | lnkkind = stat.S_IFLNK |
|
645 | 659 | |
|
646 | 660 | for fn, st in self.walk(match, subrepos, listunknown, |
|
647 | 661 | listignored).iteritems(): |
|
648 | 662 | if fn not in dmap: |
|
649 | 663 | if (listignored or match.exact(fn)) and self._dirignore(fn): |
|
650 | 664 | if listignored: |
|
651 | 665 | iadd(fn) |
|
652 | 666 | elif listunknown: |
|
653 | 667 | uadd(fn) |
|
654 | 668 | continue |
|
655 | 669 | |
|
656 | 670 | state, mode, size, time = dmap[fn] |
|
657 | 671 | |
|
658 | 672 | if not st and state in "nma": |
|
659 | 673 | dadd(fn) |
|
660 | 674 | elif state == 'n': |
|
661 | 675 | # The "mode & lnkkind != lnkkind or self._checklink" |
|
662 | 676 | # lines are an expansion of "islink => checklink" |
|
663 | 677 | # where islink means "is this a link?" and checklink |
|
664 | 678 | # means "can we check links?". |
|
665 | 679 | if (size >= 0 and |
|
666 | 680 | (size != st.st_size |
|
667 | 681 | or ((mode ^ st.st_mode) & 0100 and self._checkexec)) |
|
668 | 682 | and (mode & lnkkind != lnkkind or self._checklink) |
|
669 | 683 | or size == -2 # other parent |
|
670 | 684 | or fn in self._copymap): |
|
671 | 685 | madd(fn) |
|
672 | 686 | elif (time != int(st.st_mtime) |
|
673 | 687 | and (mode & lnkkind != lnkkind or self._checklink)): |
|
674 | 688 | ladd(fn) |
|
689 | elif lastnormal(fn): | |
|
690 | # If previously in this process we recorded that | |
|
691 | # this file is clean, think twice: intervening code | |
|
692 | # may have modified the file in the same second | |
|
693 | # without changing its size. So force caller to | |
|
694 | # check file contents. Because we're not updating | |
|
695 | # self._map, this only affects the current process. | |
|
696 | # That should be OK because this mainly affects | |
|
697 | # multiple commits in the same process, and each | |
|
698 | # commit by definition makes the committed files | |
|
699 | # clean. | |
|
700 | ladd(fn) | |
|
675 | 701 | elif listclean: |
|
676 | 702 | cadd(fn) |
|
677 | 703 | elif state == 'm': |
|
678 | 704 | madd(fn) |
|
679 | 705 | elif state == 'a': |
|
680 | 706 | aadd(fn) |
|
681 | 707 | elif state == 'r': |
|
682 | 708 | radd(fn) |
|
683 | 709 | |
|
684 | 710 | return (lookup, modified, added, removed, deleted, unknown, ignored, |
|
685 | 711 | clean) |
General Comments 0
You need to be logged in to leave comments.
Login now