Show More
@@ -0,0 +1,43 | |||
|
1 | run only on case-insensitive filesystems, because collision check at | |
|
2 | "hg update" is done only on case-insensitive filesystems | |
|
3 | ||
|
4 | $ "$TESTDIR/hghave" icasefs || exit 80 | |
|
5 | ||
|
6 | setup repository, and target files | |
|
7 | ||
|
8 | $ HGENCODING=cp932 | |
|
9 | $ export HGENCODING | |
|
10 | $ hg init t | |
|
11 | $ cd t | |
|
12 | $ python << EOF | |
|
13 | > names = ["\x83\x41", # cp932(0x83, 0x41='A'), UNICODE(0x30a2) | |
|
14 | > "\x83\x5A", # cp932(0x83, 0x5A='Z'), UNICODE(0x30bb) | |
|
15 | > "\x83\x61", # cp932(0x83, 0x61='a'), UNICODE(0x30c2) | |
|
16 | > "\x83\x7A", # cp932(0x83, 0x7A='z'), UNICODE(0x30db) | |
|
17 | > ] | |
|
18 | > for num, name in zip(range(len(names)), names): | |
|
19 | > # file for getting target filename of "hg add" | |
|
20 | > f = file(str(num), 'w'); f.write(name); f.close() | |
|
21 | > # target file of "hg add" | |
|
22 | > f = file(name, 'w'); f.write(name); f.close() | |
|
23 | > EOF | |
|
24 | ||
|
25 | test filename collison check at "hg add" | |
|
26 | ||
|
27 | $ hg add --config ui.portablefilenames=abort `cat 0` | |
|
28 | $ hg add --config ui.portablefilenames=abort `cat 1` | |
|
29 | $ hg add --config ui.portablefilenames=abort `cat 2` | |
|
30 | $ hg add --config ui.portablefilenames=abort `cat 3` | |
|
31 | $ hg status -a | |
|
32 | A \x83A (esc) | |
|
33 | A \x83Z (esc) | |
|
34 | A \x83a (esc) | |
|
35 | A \x83z (esc) | |
|
36 | ||
|
37 | test filename collision check at "hg update" | |
|
38 | ||
|
39 | $ hg commit -m 'revision 0' | |
|
40 | $ hg update null | |
|
41 | 0 files updated, 0 files merged, 4 files removed, 0 files unresolved | |
|
42 | $ hg update tip | |
|
43 | 4 files updated, 0 files merged, 0 files removed, 0 files unresolved |
@@ -1,566 +1,566 | |||
|
1 | 1 | # merge.py - directory-level update/merge handling for Mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from node import nullid, nullrev, hex, bin |
|
9 | 9 | from i18n import _ |
|
10 | import scmutil, util, filemerge, copies, subrepo | |
|
10 | import scmutil, util, filemerge, copies, subrepo, encoding | |
|
11 | 11 | import errno, os, shutil |
|
12 | 12 | |
|
13 | 13 | class mergestate(object): |
|
14 | 14 | '''track 3-way merge state of individual files''' |
|
15 | 15 | def __init__(self, repo): |
|
16 | 16 | self._repo = repo |
|
17 | 17 | self._dirty = False |
|
18 | 18 | self._read() |
|
19 | 19 | def reset(self, node=None): |
|
20 | 20 | self._state = {} |
|
21 | 21 | if node: |
|
22 | 22 | self._local = node |
|
23 | 23 | shutil.rmtree(self._repo.join("merge"), True) |
|
24 | 24 | self._dirty = False |
|
25 | 25 | def _read(self): |
|
26 | 26 | self._state = {} |
|
27 | 27 | try: |
|
28 | 28 | f = self._repo.opener("merge/state") |
|
29 | 29 | for i, l in enumerate(f): |
|
30 | 30 | if i == 0: |
|
31 | 31 | self._local = bin(l[:-1]) |
|
32 | 32 | else: |
|
33 | 33 | bits = l[:-1].split("\0") |
|
34 | 34 | self._state[bits[0]] = bits[1:] |
|
35 | 35 | f.close() |
|
36 | 36 | except IOError, err: |
|
37 | 37 | if err.errno != errno.ENOENT: |
|
38 | 38 | raise |
|
39 | 39 | self._dirty = False |
|
40 | 40 | def commit(self): |
|
41 | 41 | if self._dirty: |
|
42 | 42 | f = self._repo.opener("merge/state", "w") |
|
43 | 43 | f.write(hex(self._local) + "\n") |
|
44 | 44 | for d, v in self._state.iteritems(): |
|
45 | 45 | f.write("\0".join([d] + v) + "\n") |
|
46 | 46 | f.close() |
|
47 | 47 | self._dirty = False |
|
48 | 48 | def add(self, fcl, fco, fca, fd, flags): |
|
49 | 49 | hash = util.sha1(fcl.path()).hexdigest() |
|
50 | 50 | self._repo.opener.write("merge/" + hash, fcl.data()) |
|
51 | 51 | self._state[fd] = ['u', hash, fcl.path(), fca.path(), |
|
52 | 52 | hex(fca.filenode()), fco.path(), flags] |
|
53 | 53 | self._dirty = True |
|
54 | 54 | def __contains__(self, dfile): |
|
55 | 55 | return dfile in self._state |
|
56 | 56 | def __getitem__(self, dfile): |
|
57 | 57 | return self._state[dfile][0] |
|
58 | 58 | def __iter__(self): |
|
59 | 59 | l = self._state.keys() |
|
60 | 60 | l.sort() |
|
61 | 61 | for f in l: |
|
62 | 62 | yield f |
|
63 | 63 | def mark(self, dfile, state): |
|
64 | 64 | self._state[dfile][0] = state |
|
65 | 65 | self._dirty = True |
|
66 | 66 | def resolve(self, dfile, wctx, octx): |
|
67 | 67 | if self[dfile] == 'r': |
|
68 | 68 | return 0 |
|
69 | 69 | state, hash, lfile, afile, anode, ofile, flags = self._state[dfile] |
|
70 | 70 | f = self._repo.opener("merge/" + hash) |
|
71 | 71 | self._repo.wwrite(dfile, f.read(), flags) |
|
72 | 72 | f.close() |
|
73 | 73 | fcd = wctx[dfile] |
|
74 | 74 | fco = octx[ofile] |
|
75 | 75 | fca = self._repo.filectx(afile, fileid=anode) |
|
76 | 76 | r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca) |
|
77 | 77 | if r is None: |
|
78 | 78 | # no real conflict |
|
79 | 79 | del self._state[dfile] |
|
80 | 80 | elif not r: |
|
81 | 81 | self.mark(dfile, 'r') |
|
82 | 82 | return r |
|
83 | 83 | |
|
84 | 84 | def _checkunknown(wctx, mctx): |
|
85 | 85 | "check for collisions between unknown files and files in mctx" |
|
86 | 86 | for f in wctx.unknown(): |
|
87 | 87 | if f in mctx and mctx[f].cmp(wctx[f]): |
|
88 | 88 | raise util.Abort(_("untracked file in working directory differs" |
|
89 | 89 | " from file in requested revision: '%s'") % f) |
|
90 | 90 | |
|
91 | 91 | def _checkcollision(mctx): |
|
92 | 92 | "check for case folding collisions in the destination context" |
|
93 | 93 | folded = {} |
|
94 | 94 | for fn in mctx: |
|
95 |
fold = |
|
|
95 | fold = encoding.lower(fn) | |
|
96 | 96 | if fold in folded: |
|
97 | 97 | raise util.Abort(_("case-folding collision between %s and %s") |
|
98 | 98 | % (fn, folded[fold])) |
|
99 | 99 | folded[fold] = fn |
|
100 | 100 | |
|
101 | 101 | def _forgetremoved(wctx, mctx, branchmerge): |
|
102 | 102 | """ |
|
103 | 103 | Forget removed files |
|
104 | 104 | |
|
105 | 105 | If we're jumping between revisions (as opposed to merging), and if |
|
106 | 106 | neither the working directory nor the target rev has the file, |
|
107 | 107 | then we need to remove it from the dirstate, to prevent the |
|
108 | 108 | dirstate from listing the file when it is no longer in the |
|
109 | 109 | manifest. |
|
110 | 110 | |
|
111 | 111 | If we're merging, and the other revision has removed a file |
|
112 | 112 | that is not present in the working directory, we need to mark it |
|
113 | 113 | as removed. |
|
114 | 114 | """ |
|
115 | 115 | |
|
116 | 116 | action = [] |
|
117 | 117 | state = branchmerge and 'r' or 'f' |
|
118 | 118 | for f in wctx.deleted(): |
|
119 | 119 | if f not in mctx: |
|
120 | 120 | action.append((f, state)) |
|
121 | 121 | |
|
122 | 122 | if not branchmerge: |
|
123 | 123 | for f in wctx.removed(): |
|
124 | 124 | if f not in mctx: |
|
125 | 125 | action.append((f, "f")) |
|
126 | 126 | |
|
127 | 127 | return action |
|
128 | 128 | |
|
129 | 129 | def manifestmerge(repo, p1, p2, pa, overwrite, partial): |
|
130 | 130 | """ |
|
131 | 131 | Merge p1 and p2 with ancestor pa and generate merge action list |
|
132 | 132 | |
|
133 | 133 | overwrite = whether we clobber working files |
|
134 | 134 | partial = function to filter file lists |
|
135 | 135 | """ |
|
136 | 136 | |
|
137 | 137 | def fmerge(f, f2, fa): |
|
138 | 138 | """merge flags""" |
|
139 | 139 | a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2) |
|
140 | 140 | if m == n: # flags agree |
|
141 | 141 | return m # unchanged |
|
142 | 142 | if m and n and not a: # flags set, don't agree, differ from parent |
|
143 | 143 | r = repo.ui.promptchoice( |
|
144 | 144 | _(" conflicting flags for %s\n" |
|
145 | 145 | "(n)one, e(x)ec or sym(l)ink?") % f, |
|
146 | 146 | (_("&None"), _("E&xec"), _("Sym&link")), 0) |
|
147 | 147 | if r == 1: |
|
148 | 148 | return "x" # Exec |
|
149 | 149 | if r == 2: |
|
150 | 150 | return "l" # Symlink |
|
151 | 151 | return "" |
|
152 | 152 | if m and m != a: # changed from a to m |
|
153 | 153 | return m |
|
154 | 154 | if n and n != a: # changed from a to n |
|
155 | 155 | return n |
|
156 | 156 | return '' # flag was cleared |
|
157 | 157 | |
|
158 | 158 | def act(msg, m, f, *args): |
|
159 | 159 | repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m)) |
|
160 | 160 | action.append((f, m) + args) |
|
161 | 161 | |
|
162 | 162 | action, copy = [], {} |
|
163 | 163 | |
|
164 | 164 | if overwrite: |
|
165 | 165 | pa = p1 |
|
166 | 166 | elif pa == p2: # backwards |
|
167 | 167 | pa = p1.p1() |
|
168 | 168 | elif pa and repo.ui.configbool("merge", "followcopies", True): |
|
169 | 169 | dirs = repo.ui.configbool("merge", "followdirs", True) |
|
170 | 170 | copy, diverge = copies.copies(repo, p1, p2, pa, dirs) |
|
171 | 171 | for of, fl in diverge.iteritems(): |
|
172 | 172 | act("divergent renames", "dr", of, fl) |
|
173 | 173 | |
|
174 | 174 | repo.ui.note(_("resolving manifests\n")) |
|
175 | 175 | repo.ui.debug(" overwrite %s partial %s\n" % (overwrite, bool(partial))) |
|
176 | 176 | repo.ui.debug(" ancestor %s local %s remote %s\n" % (pa, p1, p2)) |
|
177 | 177 | |
|
178 | 178 | m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest() |
|
179 | 179 | copied = set(copy.values()) |
|
180 | 180 | |
|
181 | 181 | if '.hgsubstate' in m1: |
|
182 | 182 | # check whether sub state is modified |
|
183 | 183 | for s in p1.substate: |
|
184 | 184 | if p1.sub(s).dirty(): |
|
185 | 185 | m1['.hgsubstate'] += "+" |
|
186 | 186 | break |
|
187 | 187 | |
|
188 | 188 | # Compare manifests |
|
189 | 189 | for f, n in m1.iteritems(): |
|
190 | 190 | if partial and not partial(f): |
|
191 | 191 | continue |
|
192 | 192 | if f in m2: |
|
193 | 193 | rflags = fmerge(f, f, f) |
|
194 | 194 | a = ma.get(f, nullid) |
|
195 | 195 | if n == m2[f] or m2[f] == a: # same or local newer |
|
196 | 196 | # is file locally modified or flags need changing? |
|
197 | 197 | # dirstate flags may need to be made current |
|
198 | 198 | if m1.flags(f) != rflags or n[20:]: |
|
199 | 199 | act("update permissions", "e", f, rflags) |
|
200 | 200 | elif n == a: # remote newer |
|
201 | 201 | act("remote is newer", "g", f, rflags) |
|
202 | 202 | else: # both changed |
|
203 | 203 | act("versions differ", "m", f, f, f, rflags, False) |
|
204 | 204 | elif f in copied: # files we'll deal with on m2 side |
|
205 | 205 | pass |
|
206 | 206 | elif f in copy: |
|
207 | 207 | f2 = copy[f] |
|
208 | 208 | if f2 not in m2: # directory rename |
|
209 | 209 | act("remote renamed directory to " + f2, "d", |
|
210 | 210 | f, None, f2, m1.flags(f)) |
|
211 | 211 | else: # case 2 A,B/B/B or case 4,21 A/B/B |
|
212 | 212 | act("local copied/moved to " + f2, "m", |
|
213 | 213 | f, f2, f, fmerge(f, f2, f2), False) |
|
214 | 214 | elif f in ma: # clean, a different, no remote |
|
215 | 215 | if n != ma[f]: |
|
216 | 216 | if repo.ui.promptchoice( |
|
217 | 217 | _(" local changed %s which remote deleted\n" |
|
218 | 218 | "use (c)hanged version or (d)elete?") % f, |
|
219 | 219 | (_("&Changed"), _("&Delete")), 0): |
|
220 | 220 | act("prompt delete", "r", f) |
|
221 | 221 | else: |
|
222 | 222 | act("prompt keep", "a", f) |
|
223 | 223 | elif n[20:] == "a": # added, no remote |
|
224 | 224 | act("remote deleted", "f", f) |
|
225 | 225 | elif n[20:] != "u": |
|
226 | 226 | act("other deleted", "r", f) |
|
227 | 227 | |
|
228 | 228 | for f, n in m2.iteritems(): |
|
229 | 229 | if partial and not partial(f): |
|
230 | 230 | continue |
|
231 | 231 | if f in m1 or f in copied: # files already visited |
|
232 | 232 | continue |
|
233 | 233 | if f in copy: |
|
234 | 234 | f2 = copy[f] |
|
235 | 235 | if f2 not in m1: # directory rename |
|
236 | 236 | act("local renamed directory to " + f2, "d", |
|
237 | 237 | None, f, f2, m2.flags(f)) |
|
238 | 238 | elif f2 in m2: # rename case 1, A/A,B/A |
|
239 | 239 | act("remote copied to " + f, "m", |
|
240 | 240 | f2, f, f, fmerge(f2, f, f2), False) |
|
241 | 241 | else: # case 3,20 A/B/A |
|
242 | 242 | act("remote moved to " + f, "m", |
|
243 | 243 | f2, f, f, fmerge(f2, f, f2), True) |
|
244 | 244 | elif f not in ma: |
|
245 | 245 | act("remote created", "g", f, m2.flags(f)) |
|
246 | 246 | elif n != ma[f]: |
|
247 | 247 | if repo.ui.promptchoice( |
|
248 | 248 | _("remote changed %s which local deleted\n" |
|
249 | 249 | "use (c)hanged version or leave (d)eleted?") % f, |
|
250 | 250 | (_("&Changed"), _("&Deleted")), 0) == 0: |
|
251 | 251 | act("prompt recreating", "g", f, m2.flags(f)) |
|
252 | 252 | |
|
253 | 253 | return action |
|
254 | 254 | |
|
255 | 255 | def actionkey(a): |
|
256 | 256 | return a[1] == 'r' and -1 or 0, a |
|
257 | 257 | |
|
258 | 258 | def applyupdates(repo, action, wctx, mctx, actx, overwrite): |
|
259 | 259 | """apply the merge action list to the working directory |
|
260 | 260 | |
|
261 | 261 | wctx is the working copy context |
|
262 | 262 | mctx is the context to be merged into the working copy |
|
263 | 263 | actx is the context of the common ancestor |
|
264 | 264 | |
|
265 | 265 | Return a tuple of counts (updated, merged, removed, unresolved) that |
|
266 | 266 | describes how many files were affected by the update. |
|
267 | 267 | """ |
|
268 | 268 | |
|
269 | 269 | updated, merged, removed, unresolved = 0, 0, 0, 0 |
|
270 | 270 | ms = mergestate(repo) |
|
271 | 271 | ms.reset(wctx.p1().node()) |
|
272 | 272 | moves = [] |
|
273 | 273 | action.sort(key=actionkey) |
|
274 | 274 | |
|
275 | 275 | # prescan for merges |
|
276 | 276 | u = repo.ui |
|
277 | 277 | for a in action: |
|
278 | 278 | f, m = a[:2] |
|
279 | 279 | if m == 'm': # merge |
|
280 | 280 | f2, fd, flags, move = a[2:] |
|
281 | 281 | if f == '.hgsubstate': # merged internally |
|
282 | 282 | continue |
|
283 | 283 | repo.ui.debug("preserving %s for resolve of %s\n" % (f, fd)) |
|
284 | 284 | fcl = wctx[f] |
|
285 | 285 | fco = mctx[f2] |
|
286 | 286 | if mctx == actx: # backwards, use working dir parent as ancestor |
|
287 | 287 | if fcl.parents(): |
|
288 | 288 | fca = fcl.p1() |
|
289 | 289 | else: |
|
290 | 290 | fca = repo.filectx(f, fileid=nullrev) |
|
291 | 291 | else: |
|
292 | 292 | fca = fcl.ancestor(fco, actx) |
|
293 | 293 | if not fca: |
|
294 | 294 | fca = repo.filectx(f, fileid=nullrev) |
|
295 | 295 | ms.add(fcl, fco, fca, fd, flags) |
|
296 | 296 | if f != fd and move: |
|
297 | 297 | moves.append(f) |
|
298 | 298 | |
|
299 | 299 | audit = scmutil.pathauditor(repo.root) |
|
300 | 300 | |
|
301 | 301 | # remove renamed files after safely stored |
|
302 | 302 | for f in moves: |
|
303 | 303 | if os.path.lexists(repo.wjoin(f)): |
|
304 | 304 | repo.ui.debug("removing %s\n" % f) |
|
305 | 305 | audit(f) |
|
306 | 306 | os.unlink(repo.wjoin(f)) |
|
307 | 307 | |
|
308 | 308 | numupdates = len(action) |
|
309 | 309 | for i, a in enumerate(action): |
|
310 | 310 | f, m = a[:2] |
|
311 | 311 | u.progress(_('updating'), i + 1, item=f, total=numupdates, |
|
312 | 312 | unit=_('files')) |
|
313 | 313 | if f and f[0] == "/": |
|
314 | 314 | continue |
|
315 | 315 | if m == "r": # remove |
|
316 | 316 | repo.ui.note(_("removing %s\n") % f) |
|
317 | 317 | audit(f) |
|
318 | 318 | if f == '.hgsubstate': # subrepo states need updating |
|
319 | 319 | subrepo.submerge(repo, wctx, mctx, wctx, overwrite) |
|
320 | 320 | try: |
|
321 | 321 | util.unlinkpath(repo.wjoin(f)) |
|
322 | 322 | except OSError, inst: |
|
323 | 323 | if inst.errno != errno.ENOENT: |
|
324 | 324 | repo.ui.warn(_("update failed to remove %s: %s!\n") % |
|
325 | 325 | (f, inst.strerror)) |
|
326 | 326 | removed += 1 |
|
327 | 327 | elif m == "m": # merge |
|
328 | 328 | if f == '.hgsubstate': # subrepo states need updating |
|
329 | 329 | subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), overwrite) |
|
330 | 330 | continue |
|
331 | 331 | f2, fd, flags, move = a[2:] |
|
332 | 332 | repo.wopener.audit(fd) |
|
333 | 333 | r = ms.resolve(fd, wctx, mctx) |
|
334 | 334 | if r is not None and r > 0: |
|
335 | 335 | unresolved += 1 |
|
336 | 336 | else: |
|
337 | 337 | if r is None: |
|
338 | 338 | updated += 1 |
|
339 | 339 | else: |
|
340 | 340 | merged += 1 |
|
341 | 341 | util.setflags(repo.wjoin(fd), 'l' in flags, 'x' in flags) |
|
342 | 342 | if (move and repo.dirstate.normalize(fd) != f |
|
343 | 343 | and os.path.lexists(repo.wjoin(f))): |
|
344 | 344 | repo.ui.debug("removing %s\n" % f) |
|
345 | 345 | audit(f) |
|
346 | 346 | os.unlink(repo.wjoin(f)) |
|
347 | 347 | elif m == "g": # get |
|
348 | 348 | flags = a[2] |
|
349 | 349 | repo.ui.note(_("getting %s\n") % f) |
|
350 | 350 | t = mctx.filectx(f).data() |
|
351 | 351 | repo.wwrite(f, t, flags) |
|
352 | 352 | t = None |
|
353 | 353 | updated += 1 |
|
354 | 354 | if f == '.hgsubstate': # subrepo states need updating |
|
355 | 355 | subrepo.submerge(repo, wctx, mctx, wctx, overwrite) |
|
356 | 356 | elif m == "d": # directory rename |
|
357 | 357 | f2, fd, flags = a[2:] |
|
358 | 358 | if f: |
|
359 | 359 | repo.ui.note(_("moving %s to %s\n") % (f, fd)) |
|
360 | 360 | audit(f) |
|
361 | 361 | t = wctx.filectx(f).data() |
|
362 | 362 | repo.wwrite(fd, t, flags) |
|
363 | 363 | util.unlinkpath(repo.wjoin(f)) |
|
364 | 364 | if f2: |
|
365 | 365 | repo.ui.note(_("getting %s to %s\n") % (f2, fd)) |
|
366 | 366 | t = mctx.filectx(f2).data() |
|
367 | 367 | repo.wwrite(fd, t, flags) |
|
368 | 368 | updated += 1 |
|
369 | 369 | elif m == "dr": # divergent renames |
|
370 | 370 | fl = a[2] |
|
371 | 371 | repo.ui.warn(_("note: possible conflict - %s was renamed " |
|
372 | 372 | "multiple times to:\n") % f) |
|
373 | 373 | for nf in fl: |
|
374 | 374 | repo.ui.warn(" %s\n" % nf) |
|
375 | 375 | elif m == "e": # exec |
|
376 | 376 | flags = a[2] |
|
377 | 377 | repo.wopener.audit(f) |
|
378 | 378 | util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags) |
|
379 | 379 | ms.commit() |
|
380 | 380 | u.progress(_('updating'), None, total=numupdates, unit=_('files')) |
|
381 | 381 | |
|
382 | 382 | return updated, merged, removed, unresolved |
|
383 | 383 | |
|
384 | 384 | def recordupdates(repo, action, branchmerge): |
|
385 | 385 | "record merge actions to the dirstate" |
|
386 | 386 | |
|
387 | 387 | for a in action: |
|
388 | 388 | f, m = a[:2] |
|
389 | 389 | if m == "r": # remove |
|
390 | 390 | if branchmerge: |
|
391 | 391 | repo.dirstate.remove(f) |
|
392 | 392 | else: |
|
393 | 393 | repo.dirstate.drop(f) |
|
394 | 394 | elif m == "a": # re-add |
|
395 | 395 | if not branchmerge: |
|
396 | 396 | repo.dirstate.add(f) |
|
397 | 397 | elif m == "f": # forget |
|
398 | 398 | repo.dirstate.drop(f) |
|
399 | 399 | elif m == "e": # exec change |
|
400 | 400 | repo.dirstate.normallookup(f) |
|
401 | 401 | elif m == "g": # get |
|
402 | 402 | if branchmerge: |
|
403 | 403 | repo.dirstate.otherparent(f) |
|
404 | 404 | else: |
|
405 | 405 | repo.dirstate.normal(f) |
|
406 | 406 | elif m == "m": # merge |
|
407 | 407 | f2, fd, flag, move = a[2:] |
|
408 | 408 | if branchmerge: |
|
409 | 409 | # We've done a branch merge, mark this file as merged |
|
410 | 410 | # so that we properly record the merger later |
|
411 | 411 | repo.dirstate.merge(fd) |
|
412 | 412 | if f != f2: # copy/rename |
|
413 | 413 | if move: |
|
414 | 414 | repo.dirstate.remove(f) |
|
415 | 415 | if f != fd: |
|
416 | 416 | repo.dirstate.copy(f, fd) |
|
417 | 417 | else: |
|
418 | 418 | repo.dirstate.copy(f2, fd) |
|
419 | 419 | else: |
|
420 | 420 | # We've update-merged a locally modified file, so |
|
421 | 421 | # we set the dirstate to emulate a normal checkout |
|
422 | 422 | # of that file some time in the past. Thus our |
|
423 | 423 | # merge will appear as a normal local file |
|
424 | 424 | # modification. |
|
425 | 425 | if f2 == fd: # file not locally copied/moved |
|
426 | 426 | repo.dirstate.normallookup(fd) |
|
427 | 427 | if move: |
|
428 | 428 | repo.dirstate.drop(f) |
|
429 | 429 | elif m == "d": # directory rename |
|
430 | 430 | f2, fd, flag = a[2:] |
|
431 | 431 | if not f2 and f not in repo.dirstate: |
|
432 | 432 | # untracked file moved |
|
433 | 433 | continue |
|
434 | 434 | if branchmerge: |
|
435 | 435 | repo.dirstate.add(fd) |
|
436 | 436 | if f: |
|
437 | 437 | repo.dirstate.remove(f) |
|
438 | 438 | repo.dirstate.copy(f, fd) |
|
439 | 439 | if f2: |
|
440 | 440 | repo.dirstate.copy(f2, fd) |
|
441 | 441 | else: |
|
442 | 442 | repo.dirstate.normal(fd) |
|
443 | 443 | if f: |
|
444 | 444 | repo.dirstate.drop(f) |
|
445 | 445 | |
|
446 | 446 | def update(repo, node, branchmerge, force, partial, ancestor=None): |
|
447 | 447 | """ |
|
448 | 448 | Perform a merge between the working directory and the given node |
|
449 | 449 | |
|
450 | 450 | node = the node to update to, or None if unspecified |
|
451 | 451 | branchmerge = whether to merge between branches |
|
452 | 452 | force = whether to force branch merging or file overwriting |
|
453 | 453 | partial = a function to filter file lists (dirstate not updated) |
|
454 | 454 | |
|
455 | 455 | The table below shows all the behaviors of the update command |
|
456 | 456 | given the -c and -C or no options, whether the working directory |
|
457 | 457 | is dirty, whether a revision is specified, and the relationship of |
|
458 | 458 | the parent rev to the target rev (linear, on the same named |
|
459 | 459 | branch, or on another named branch). |
|
460 | 460 | |
|
461 | 461 | This logic is tested by test-update-branches.t. |
|
462 | 462 | |
|
463 | 463 | -c -C dirty rev | linear same cross |
|
464 | 464 | n n n n | ok (1) x |
|
465 | 465 | n n n y | ok ok ok |
|
466 | 466 | n n y * | merge (2) (2) |
|
467 | 467 | n y * * | --- discard --- |
|
468 | 468 | y n y * | --- (3) --- |
|
469 | 469 | y n n * | --- ok --- |
|
470 | 470 | y y * * | --- (4) --- |
|
471 | 471 | |
|
472 | 472 | x = can't happen |
|
473 | 473 | * = don't-care |
|
474 | 474 | 1 = abort: crosses branches (use 'hg merge' or 'hg update -c') |
|
475 | 475 | 2 = abort: crosses branches (use 'hg merge' to merge or |
|
476 | 476 | use 'hg update -C' to discard changes) |
|
477 | 477 | 3 = abort: uncommitted local changes |
|
478 | 478 | 4 = incompatible options (checked in commands.py) |
|
479 | 479 | |
|
480 | 480 | Return the same tuple as applyupdates(). |
|
481 | 481 | """ |
|
482 | 482 | |
|
483 | 483 | onode = node |
|
484 | 484 | wlock = repo.wlock() |
|
485 | 485 | try: |
|
486 | 486 | wc = repo[None] |
|
487 | 487 | if node is None: |
|
488 | 488 | # tip of current branch |
|
489 | 489 | try: |
|
490 | 490 | node = repo.branchtags()[wc.branch()] |
|
491 | 491 | except KeyError: |
|
492 | 492 | if wc.branch() == "default": # no default branch! |
|
493 | 493 | node = repo.lookup("tip") # update to tip |
|
494 | 494 | else: |
|
495 | 495 | raise util.Abort(_("branch %s not found") % wc.branch()) |
|
496 | 496 | overwrite = force and not branchmerge |
|
497 | 497 | pl = wc.parents() |
|
498 | 498 | p1, p2 = pl[0], repo[node] |
|
499 | 499 | if ancestor: |
|
500 | 500 | pa = repo[ancestor] |
|
501 | 501 | else: |
|
502 | 502 | pa = p1.ancestor(p2) |
|
503 | 503 | |
|
504 | 504 | fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2) |
|
505 | 505 | |
|
506 | 506 | ### check phase |
|
507 | 507 | if not overwrite and len(pl) > 1: |
|
508 | 508 | raise util.Abort(_("outstanding uncommitted merges")) |
|
509 | 509 | if branchmerge: |
|
510 | 510 | if pa == p2: |
|
511 | 511 | raise util.Abort(_("merging with a working directory ancestor" |
|
512 | 512 | " has no effect")) |
|
513 | 513 | elif pa == p1: |
|
514 | 514 | if p1.branch() == p2.branch(): |
|
515 | 515 | raise util.Abort(_("nothing to merge (use 'hg update'" |
|
516 | 516 | " or check 'hg heads')")) |
|
517 | 517 | if not force and (wc.files() or wc.deleted()): |
|
518 | 518 | raise util.Abort(_("outstanding uncommitted changes " |
|
519 | 519 | "(use 'hg status' to list changes)")) |
|
520 | 520 | for s in wc.substate: |
|
521 | 521 | if wc.sub(s).dirty(): |
|
522 | 522 | raise util.Abort(_("outstanding uncommitted changes in " |
|
523 | 523 | "subrepository '%s'") % s) |
|
524 | 524 | |
|
525 | 525 | elif not overwrite: |
|
526 | 526 | if pa == p1 or pa == p2: # linear |
|
527 | 527 | pass # all good |
|
528 | 528 | elif wc.dirty(missing=True): |
|
529 | 529 | raise util.Abort(_("crosses branches (merge branches or use" |
|
530 | 530 | " --clean to discard changes)")) |
|
531 | 531 | elif onode is None: |
|
532 | 532 | raise util.Abort(_("crosses branches (merge branches or update" |
|
533 | 533 | " --check to force update)")) |
|
534 | 534 | else: |
|
535 | 535 | # Allow jumping branches if clean and specific rev given |
|
536 | 536 | overwrite = True |
|
537 | 537 | |
|
538 | 538 | ### calculate phase |
|
539 | 539 | action = [] |
|
540 | 540 | wc.status(unknown=True) # prime cache |
|
541 | 541 | if not force: |
|
542 | 542 | _checkunknown(wc, p2) |
|
543 | 543 | if not util.checkcase(repo.path): |
|
544 | 544 | _checkcollision(p2) |
|
545 | 545 | action += _forgetremoved(wc, p2, branchmerge) |
|
546 | 546 | action += manifestmerge(repo, wc, p2, pa, overwrite, partial) |
|
547 | 547 | |
|
548 | 548 | ### apply phase |
|
549 | 549 | if not branchmerge: # just jump to the new rev |
|
550 | 550 | fp1, fp2, xp1, xp2 = fp2, nullid, xp2, '' |
|
551 | 551 | if not partial: |
|
552 | 552 | repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2) |
|
553 | 553 | |
|
554 | 554 | stats = applyupdates(repo, action, wc, p2, pa, overwrite) |
|
555 | 555 | |
|
556 | 556 | if not partial: |
|
557 | 557 | repo.dirstate.setparents(fp1, fp2) |
|
558 | 558 | recordupdates(repo, action, branchmerge) |
|
559 | 559 | if not branchmerge: |
|
560 | 560 | repo.dirstate.setbranch(p2.branch()) |
|
561 | 561 | finally: |
|
562 | 562 | wlock.release() |
|
563 | 563 | |
|
564 | 564 | if not partial: |
|
565 | 565 | repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3]) |
|
566 | 566 | return stats |
@@ -1,711 +1,711 | |||
|
1 | 1 | # scmutil.py - Mercurial core utility functions |
|
2 | 2 | # |
|
3 | 3 | # Copyright Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from i18n import _ |
|
9 | import util, error, osutil, revset, similar | |
|
9 | import util, error, osutil, revset, similar, encoding | |
|
10 | 10 | import match as matchmod |
|
11 | 11 | import os, errno, re, stat, sys, glob |
|
12 | 12 | |
|
13 | 13 | def checkfilename(f): |
|
14 | 14 | '''Check that the filename f is an acceptable filename for a tracked file''' |
|
15 | 15 | if '\r' in f or '\n' in f: |
|
16 | 16 | raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f) |
|
17 | 17 | |
|
18 | 18 | def checkportable(ui, f): |
|
19 | 19 | '''Check if filename f is portable and warn or abort depending on config''' |
|
20 | 20 | checkfilename(f) |
|
21 | 21 | abort, warn = checkportabilityalert(ui) |
|
22 | 22 | if abort or warn: |
|
23 | 23 | msg = util.checkwinfilename(f) |
|
24 | 24 | if msg: |
|
25 | 25 | msg = "%s: %r" % (msg, f) |
|
26 | 26 | if abort: |
|
27 | 27 | raise util.Abort(msg) |
|
28 | 28 | ui.warn(_("warning: %s\n") % msg) |
|
29 | 29 | |
|
30 | 30 | def checkportabilityalert(ui): |
|
31 | 31 | '''check if the user's config requests nothing, a warning, or abort for |
|
32 | 32 | non-portable filenames''' |
|
33 | 33 | val = ui.config('ui', 'portablefilenames', 'warn') |
|
34 | 34 | lval = val.lower() |
|
35 | 35 | bval = util.parsebool(val) |
|
36 | 36 | abort = os.name == 'nt' or lval == 'abort' |
|
37 | 37 | warn = bval or lval == 'warn' |
|
38 | 38 | if bval is None and not (warn or abort or lval == 'ignore'): |
|
39 | 39 | raise error.ConfigError( |
|
40 | 40 | _("ui.portablefilenames value is invalid ('%s')") % val) |
|
41 | 41 | return abort, warn |
|
42 | 42 | |
|
43 | 43 | class casecollisionauditor(object): |
|
44 | 44 | def __init__(self, ui, abort, existingiter): |
|
45 | 45 | self._ui = ui |
|
46 | 46 | self._abort = abort |
|
47 | 47 | self._map = {} |
|
48 | 48 | for f in existingiter: |
|
49 |
self._map[ |
|
|
49 | self._map[encoding.lower(f)] = f | |
|
50 | 50 | |
|
51 | 51 | def __call__(self, f): |
|
52 |
fl = |
|
|
52 | fl = encoding.lower(f) | |
|
53 | 53 | map = self._map |
|
54 | 54 | if fl in map and map[fl] != f: |
|
55 | 55 | msg = _('possible case-folding collision for %s') % f |
|
56 | 56 | if self._abort: |
|
57 | 57 | raise util.Abort(msg) |
|
58 | 58 | self._ui.warn(_("warning: %s\n") % msg) |
|
59 | 59 | map[fl] = f |
|
60 | 60 | |
|
61 | 61 | class pathauditor(object): |
|
62 | 62 | '''ensure that a filesystem path contains no banned components. |
|
63 | 63 | the following properties of a path are checked: |
|
64 | 64 | |
|
65 | 65 | - ends with a directory separator |
|
66 | 66 | - under top-level .hg |
|
67 | 67 | - starts at the root of a windows drive |
|
68 | 68 | - contains ".." |
|
69 | 69 | - traverses a symlink (e.g. a/symlink_here/b) |
|
70 | 70 | - inside a nested repository (a callback can be used to approve |
|
71 | 71 | some nested repositories, e.g., subrepositories) |
|
72 | 72 | ''' |
|
73 | 73 | |
|
74 | 74 | def __init__(self, root, callback=None): |
|
75 | 75 | self.audited = set() |
|
76 | 76 | self.auditeddir = set() |
|
77 | 77 | self.root = root |
|
78 | 78 | self.callback = callback |
|
79 | 79 | |
|
80 | 80 | def __call__(self, path): |
|
81 | 81 | '''Check the relative path. |
|
82 | 82 | path may contain a pattern (e.g. foodir/**.txt)''' |
|
83 | 83 | |
|
84 | 84 | if path in self.audited: |
|
85 | 85 | return |
|
86 | 86 | # AIX ignores "/" at end of path, others raise EISDIR. |
|
87 | 87 | if util.endswithsep(path): |
|
88 | 88 | raise util.Abort(_("path ends in directory separator: %s") % path) |
|
89 | 89 | normpath = os.path.normcase(path) |
|
90 | 90 | parts = util.splitpath(normpath) |
|
91 | 91 | if (os.path.splitdrive(path)[0] |
|
92 | 92 | or parts[0].lower() in ('.hg', '.hg.', '') |
|
93 | 93 | or os.pardir in parts): |
|
94 | 94 | raise util.Abort(_("path contains illegal component: %s") % path) |
|
95 | 95 | if '.hg' in path.lower(): |
|
96 | 96 | lparts = [p.lower() for p in parts] |
|
97 | 97 | for p in '.hg', '.hg.': |
|
98 | 98 | if p in lparts[1:]: |
|
99 | 99 | pos = lparts.index(p) |
|
100 | 100 | base = os.path.join(*parts[:pos]) |
|
101 | 101 | raise util.Abort(_('path %r is inside nested repo %r') |
|
102 | 102 | % (path, base)) |
|
103 | 103 | |
|
104 | 104 | parts.pop() |
|
105 | 105 | prefixes = [] |
|
106 | 106 | while parts: |
|
107 | 107 | prefix = os.sep.join(parts) |
|
108 | 108 | if prefix in self.auditeddir: |
|
109 | 109 | break |
|
110 | 110 | curpath = os.path.join(self.root, prefix) |
|
111 | 111 | try: |
|
112 | 112 | st = os.lstat(curpath) |
|
113 | 113 | except OSError, err: |
|
114 | 114 | # EINVAL can be raised as invalid path syntax under win32. |
|
115 | 115 | # They must be ignored for patterns can be checked too. |
|
116 | 116 | if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL): |
|
117 | 117 | raise |
|
118 | 118 | else: |
|
119 | 119 | if stat.S_ISLNK(st.st_mode): |
|
120 | 120 | raise util.Abort( |
|
121 | 121 | _('path %r traverses symbolic link %r') |
|
122 | 122 | % (path, prefix)) |
|
123 | 123 | elif (stat.S_ISDIR(st.st_mode) and |
|
124 | 124 | os.path.isdir(os.path.join(curpath, '.hg'))): |
|
125 | 125 | if not self.callback or not self.callback(curpath): |
|
126 | 126 | raise util.Abort(_('path %r is inside nested repo %r') % |
|
127 | 127 | (path, prefix)) |
|
128 | 128 | prefixes.append(prefix) |
|
129 | 129 | parts.pop() |
|
130 | 130 | |
|
131 | 131 | self.audited.add(path) |
|
132 | 132 | # only add prefixes to the cache after checking everything: we don't |
|
133 | 133 | # want to add "foo/bar/baz" before checking if there's a "foo/.hg" |
|
134 | 134 | self.auditeddir.update(prefixes) |
|
135 | 135 | |
|
136 | 136 | class abstractopener(object): |
|
137 | 137 | """Abstract base class; cannot be instantiated""" |
|
138 | 138 | |
|
139 | 139 | def __init__(self, *args, **kwargs): |
|
140 | 140 | '''Prevent instantiation; don't call this from subclasses.''' |
|
141 | 141 | raise NotImplementedError('attempted instantiating ' + str(type(self))) |
|
142 | 142 | |
|
143 | 143 | def read(self, path): |
|
144 | 144 | fp = self(path, 'rb') |
|
145 | 145 | try: |
|
146 | 146 | return fp.read() |
|
147 | 147 | finally: |
|
148 | 148 | fp.close() |
|
149 | 149 | |
|
150 | 150 | def write(self, path, data): |
|
151 | 151 | fp = self(path, 'wb') |
|
152 | 152 | try: |
|
153 | 153 | return fp.write(data) |
|
154 | 154 | finally: |
|
155 | 155 | fp.close() |
|
156 | 156 | |
|
157 | 157 | def append(self, path, data): |
|
158 | 158 | fp = self(path, 'ab') |
|
159 | 159 | try: |
|
160 | 160 | return fp.write(data) |
|
161 | 161 | finally: |
|
162 | 162 | fp.close() |
|
163 | 163 | |
|
164 | 164 | class opener(abstractopener): |
|
165 | 165 | '''Open files relative to a base directory |
|
166 | 166 | |
|
167 | 167 | This class is used to hide the details of COW semantics and |
|
168 | 168 | remote file access from higher level code. |
|
169 | 169 | ''' |
|
170 | 170 | def __init__(self, base, audit=True): |
|
171 | 171 | self.base = base |
|
172 | 172 | self._audit = audit |
|
173 | 173 | if audit: |
|
174 | 174 | self.auditor = pathauditor(base) |
|
175 | 175 | else: |
|
176 | 176 | self.auditor = util.always |
|
177 | 177 | self.createmode = None |
|
178 | 178 | self._trustnlink = None |
|
179 | 179 | |
|
180 | 180 | @util.propertycache |
|
181 | 181 | def _cansymlink(self): |
|
182 | 182 | return util.checklink(self.base) |
|
183 | 183 | |
|
184 | 184 | def _fixfilemode(self, name): |
|
185 | 185 | if self.createmode is None: |
|
186 | 186 | return |
|
187 | 187 | os.chmod(name, self.createmode & 0666) |
|
188 | 188 | |
|
189 | 189 | def __call__(self, path, mode="r", text=False, atomictemp=False): |
|
190 | 190 | if self._audit: |
|
191 | 191 | r = util.checkosfilename(path) |
|
192 | 192 | if r: |
|
193 | 193 | raise util.Abort("%s: %r" % (r, path)) |
|
194 | 194 | self.auditor(path) |
|
195 | 195 | f = os.path.join(self.base, path) |
|
196 | 196 | |
|
197 | 197 | if not text and "b" not in mode: |
|
198 | 198 | mode += "b" # for that other OS |
|
199 | 199 | |
|
200 | 200 | nlink = -1 |
|
201 | 201 | dirname, basename = os.path.split(f) |
|
202 | 202 | # If basename is empty, then the path is malformed because it points |
|
203 | 203 | # to a directory. Let the posixfile() call below raise IOError. |
|
204 | 204 | if basename and mode not in ('r', 'rb'): |
|
205 | 205 | if atomictemp: |
|
206 | 206 | if not os.path.isdir(dirname): |
|
207 | 207 | util.makedirs(dirname, self.createmode) |
|
208 | 208 | return util.atomictempfile(f, mode, self.createmode) |
|
209 | 209 | try: |
|
210 | 210 | if 'w' in mode: |
|
211 | 211 | util.unlink(f) |
|
212 | 212 | nlink = 0 |
|
213 | 213 | else: |
|
214 | 214 | # nlinks() may behave differently for files on Windows |
|
215 | 215 | # shares if the file is open. |
|
216 | 216 | fd = util.posixfile(f) |
|
217 | 217 | nlink = util.nlinks(f) |
|
218 | 218 | if nlink < 1: |
|
219 | 219 | nlink = 2 # force mktempcopy (issue1922) |
|
220 | 220 | fd.close() |
|
221 | 221 | except (OSError, IOError), e: |
|
222 | 222 | if e.errno != errno.ENOENT: |
|
223 | 223 | raise |
|
224 | 224 | nlink = 0 |
|
225 | 225 | if not os.path.isdir(dirname): |
|
226 | 226 | util.makedirs(dirname, self.createmode) |
|
227 | 227 | if nlink > 0: |
|
228 | 228 | if self._trustnlink is None: |
|
229 | 229 | self._trustnlink = nlink > 1 or util.checknlink(f) |
|
230 | 230 | if nlink > 1 or not self._trustnlink: |
|
231 | 231 | util.rename(util.mktempcopy(f), f) |
|
232 | 232 | fp = util.posixfile(f, mode) |
|
233 | 233 | if nlink == 0: |
|
234 | 234 | self._fixfilemode(f) |
|
235 | 235 | return fp |
|
236 | 236 | |
|
237 | 237 | def symlink(self, src, dst): |
|
238 | 238 | self.auditor(dst) |
|
239 | 239 | linkname = os.path.join(self.base, dst) |
|
240 | 240 | try: |
|
241 | 241 | os.unlink(linkname) |
|
242 | 242 | except OSError: |
|
243 | 243 | pass |
|
244 | 244 | |
|
245 | 245 | dirname = os.path.dirname(linkname) |
|
246 | 246 | if not os.path.exists(dirname): |
|
247 | 247 | util.makedirs(dirname, self.createmode) |
|
248 | 248 | |
|
249 | 249 | if self._cansymlink: |
|
250 | 250 | try: |
|
251 | 251 | os.symlink(src, linkname) |
|
252 | 252 | except OSError, err: |
|
253 | 253 | raise OSError(err.errno, _('could not symlink to %r: %s') % |
|
254 | 254 | (src, err.strerror), linkname) |
|
255 | 255 | else: |
|
256 | 256 | f = self(dst, "w") |
|
257 | 257 | f.write(src) |
|
258 | 258 | f.close() |
|
259 | 259 | self._fixfilemode(dst) |
|
260 | 260 | |
|
261 | 261 | def audit(self, path): |
|
262 | 262 | self.auditor(path) |
|
263 | 263 | |
|
264 | 264 | class filteropener(abstractopener): |
|
265 | 265 | '''Wrapper opener for filtering filenames with a function.''' |
|
266 | 266 | |
|
267 | 267 | def __init__(self, opener, filter): |
|
268 | 268 | self._filter = filter |
|
269 | 269 | self._orig = opener |
|
270 | 270 | |
|
271 | 271 | def __call__(self, path, *args, **kwargs): |
|
272 | 272 | return self._orig(self._filter(path), *args, **kwargs) |
|
273 | 273 | |
|
274 | 274 | def canonpath(root, cwd, myname, auditor=None): |
|
275 | 275 | '''return the canonical path of myname, given cwd and root''' |
|
276 | 276 | if util.endswithsep(root): |
|
277 | 277 | rootsep = root |
|
278 | 278 | else: |
|
279 | 279 | rootsep = root + os.sep |
|
280 | 280 | name = myname |
|
281 | 281 | if not os.path.isabs(name): |
|
282 | 282 | name = os.path.join(root, cwd, name) |
|
283 | 283 | name = os.path.normpath(name) |
|
284 | 284 | if auditor is None: |
|
285 | 285 | auditor = pathauditor(root) |
|
286 | 286 | if name != rootsep and name.startswith(rootsep): |
|
287 | 287 | name = name[len(rootsep):] |
|
288 | 288 | auditor(name) |
|
289 | 289 | return util.pconvert(name) |
|
290 | 290 | elif name == root: |
|
291 | 291 | return '' |
|
292 | 292 | else: |
|
293 | 293 | # Determine whether `name' is in the hierarchy at or beneath `root', |
|
294 | 294 | # by iterating name=dirname(name) until that causes no change (can't |
|
295 | 295 | # check name == '/', because that doesn't work on windows). For each |
|
296 | 296 | # `name', compare dev/inode numbers. If they match, the list `rel' |
|
297 | 297 | # holds the reversed list of components making up the relative file |
|
298 | 298 | # name we want. |
|
299 | 299 | root_st = os.stat(root) |
|
300 | 300 | rel = [] |
|
301 | 301 | while True: |
|
302 | 302 | try: |
|
303 | 303 | name_st = os.stat(name) |
|
304 | 304 | except OSError: |
|
305 | 305 | break |
|
306 | 306 | if util.samestat(name_st, root_st): |
|
307 | 307 | if not rel: |
|
308 | 308 | # name was actually the same as root (maybe a symlink) |
|
309 | 309 | return '' |
|
310 | 310 | rel.reverse() |
|
311 | 311 | name = os.path.join(*rel) |
|
312 | 312 | auditor(name) |
|
313 | 313 | return util.pconvert(name) |
|
314 | 314 | dirname, basename = os.path.split(name) |
|
315 | 315 | rel.append(basename) |
|
316 | 316 | if dirname == name: |
|
317 | 317 | break |
|
318 | 318 | name = dirname |
|
319 | 319 | |
|
320 | 320 | raise util.Abort('%s not under root' % myname) |
|
321 | 321 | |
|
322 | 322 | def walkrepos(path, followsym=False, seen_dirs=None, recurse=False): |
|
323 | 323 | '''yield every hg repository under path, recursively.''' |
|
324 | 324 | def errhandler(err): |
|
325 | 325 | if err.filename == path: |
|
326 | 326 | raise err |
|
327 | 327 | if followsym and hasattr(os.path, 'samestat'): |
|
328 | 328 | def adddir(dirlst, dirname): |
|
329 | 329 | match = False |
|
330 | 330 | samestat = os.path.samestat |
|
331 | 331 | dirstat = os.stat(dirname) |
|
332 | 332 | for lstdirstat in dirlst: |
|
333 | 333 | if samestat(dirstat, lstdirstat): |
|
334 | 334 | match = True |
|
335 | 335 | break |
|
336 | 336 | if not match: |
|
337 | 337 | dirlst.append(dirstat) |
|
338 | 338 | return not match |
|
339 | 339 | else: |
|
340 | 340 | followsym = False |
|
341 | 341 | |
|
342 | 342 | if (seen_dirs is None) and followsym: |
|
343 | 343 | seen_dirs = [] |
|
344 | 344 | adddir(seen_dirs, path) |
|
345 | 345 | for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler): |
|
346 | 346 | dirs.sort() |
|
347 | 347 | if '.hg' in dirs: |
|
348 | 348 | yield root # found a repository |
|
349 | 349 | qroot = os.path.join(root, '.hg', 'patches') |
|
350 | 350 | if os.path.isdir(os.path.join(qroot, '.hg')): |
|
351 | 351 | yield qroot # we have a patch queue repo here |
|
352 | 352 | if recurse: |
|
353 | 353 | # avoid recursing inside the .hg directory |
|
354 | 354 | dirs.remove('.hg') |
|
355 | 355 | else: |
|
356 | 356 | dirs[:] = [] # don't descend further |
|
357 | 357 | elif followsym: |
|
358 | 358 | newdirs = [] |
|
359 | 359 | for d in dirs: |
|
360 | 360 | fname = os.path.join(root, d) |
|
361 | 361 | if adddir(seen_dirs, fname): |
|
362 | 362 | if os.path.islink(fname): |
|
363 | 363 | for hgname in walkrepos(fname, True, seen_dirs): |
|
364 | 364 | yield hgname |
|
365 | 365 | else: |
|
366 | 366 | newdirs.append(d) |
|
367 | 367 | dirs[:] = newdirs |
|
368 | 368 | |
|
369 | 369 | def osrcpath(): |
|
370 | 370 | '''return default os-specific hgrc search path''' |
|
371 | 371 | path = systemrcpath() |
|
372 | 372 | path.extend(userrcpath()) |
|
373 | 373 | path = [os.path.normpath(f) for f in path] |
|
374 | 374 | return path |
|
375 | 375 | |
|
376 | 376 | _rcpath = None |
|
377 | 377 | |
|
378 | 378 | def rcpath(): |
|
379 | 379 | '''return hgrc search path. if env var HGRCPATH is set, use it. |
|
380 | 380 | for each item in path, if directory, use files ending in .rc, |
|
381 | 381 | else use item. |
|
382 | 382 | make HGRCPATH empty to only look in .hg/hgrc of current repo. |
|
383 | 383 | if no HGRCPATH, use default os-specific path.''' |
|
384 | 384 | global _rcpath |
|
385 | 385 | if _rcpath is None: |
|
386 | 386 | if 'HGRCPATH' in os.environ: |
|
387 | 387 | _rcpath = [] |
|
388 | 388 | for p in os.environ['HGRCPATH'].split(os.pathsep): |
|
389 | 389 | if not p: |
|
390 | 390 | continue |
|
391 | 391 | p = util.expandpath(p) |
|
392 | 392 | if os.path.isdir(p): |
|
393 | 393 | for f, kind in osutil.listdir(p): |
|
394 | 394 | if f.endswith('.rc'): |
|
395 | 395 | _rcpath.append(os.path.join(p, f)) |
|
396 | 396 | else: |
|
397 | 397 | _rcpath.append(p) |
|
398 | 398 | else: |
|
399 | 399 | _rcpath = osrcpath() |
|
400 | 400 | return _rcpath |
|
401 | 401 | |
|
402 | 402 | if os.name != 'nt': |
|
403 | 403 | |
|
404 | 404 | def rcfiles(path): |
|
405 | 405 | rcs = [os.path.join(path, 'hgrc')] |
|
406 | 406 | rcdir = os.path.join(path, 'hgrc.d') |
|
407 | 407 | try: |
|
408 | 408 | rcs.extend([os.path.join(rcdir, f) |
|
409 | 409 | for f, kind in osutil.listdir(rcdir) |
|
410 | 410 | if f.endswith(".rc")]) |
|
411 | 411 | except OSError: |
|
412 | 412 | pass |
|
413 | 413 | return rcs |
|
414 | 414 | |
|
415 | 415 | def systemrcpath(): |
|
416 | 416 | path = [] |
|
417 | 417 | # old mod_python does not set sys.argv |
|
418 | 418 | if len(getattr(sys, 'argv', [])) > 0: |
|
419 | 419 | p = os.path.dirname(os.path.dirname(sys.argv[0])) |
|
420 | 420 | path.extend(rcfiles(os.path.join(p, 'etc/mercurial'))) |
|
421 | 421 | path.extend(rcfiles('/etc/mercurial')) |
|
422 | 422 | return path |
|
423 | 423 | |
|
424 | 424 | def userrcpath(): |
|
425 | 425 | return [os.path.expanduser('~/.hgrc')] |
|
426 | 426 | |
|
427 | 427 | else: |
|
428 | 428 | |
|
429 | 429 | _HKEY_LOCAL_MACHINE = 0x80000002L |
|
430 | 430 | |
|
431 | 431 | def systemrcpath(): |
|
432 | 432 | '''return default os-specific hgrc search path''' |
|
433 | 433 | rcpath = [] |
|
434 | 434 | filename = util.executablepath() |
|
435 | 435 | # Use mercurial.ini found in directory with hg.exe |
|
436 | 436 | progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini') |
|
437 | 437 | if os.path.isfile(progrc): |
|
438 | 438 | rcpath.append(progrc) |
|
439 | 439 | return rcpath |
|
440 | 440 | # Use hgrc.d found in directory with hg.exe |
|
441 | 441 | progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d') |
|
442 | 442 | if os.path.isdir(progrcd): |
|
443 | 443 | for f, kind in osutil.listdir(progrcd): |
|
444 | 444 | if f.endswith('.rc'): |
|
445 | 445 | rcpath.append(os.path.join(progrcd, f)) |
|
446 | 446 | return rcpath |
|
447 | 447 | # else look for a system rcpath in the registry |
|
448 | 448 | value = util.lookupreg('SOFTWARE\\Mercurial', None, |
|
449 | 449 | _HKEY_LOCAL_MACHINE) |
|
450 | 450 | if not isinstance(value, str) or not value: |
|
451 | 451 | return rcpath |
|
452 | 452 | value = value.replace('/', os.sep) |
|
453 | 453 | for p in value.split(os.pathsep): |
|
454 | 454 | if p.lower().endswith('mercurial.ini'): |
|
455 | 455 | rcpath.append(p) |
|
456 | 456 | elif os.path.isdir(p): |
|
457 | 457 | for f, kind in osutil.listdir(p): |
|
458 | 458 | if f.endswith('.rc'): |
|
459 | 459 | rcpath.append(os.path.join(p, f)) |
|
460 | 460 | return rcpath |
|
461 | 461 | |
|
462 | 462 | def userrcpath(): |
|
463 | 463 | '''return os-specific hgrc search path to the user dir''' |
|
464 | 464 | home = os.path.expanduser('~') |
|
465 | 465 | path = [os.path.join(home, 'mercurial.ini'), |
|
466 | 466 | os.path.join(home, '.hgrc')] |
|
467 | 467 | userprofile = os.environ.get('USERPROFILE') |
|
468 | 468 | if userprofile: |
|
469 | 469 | path.append(os.path.join(userprofile, 'mercurial.ini')) |
|
470 | 470 | path.append(os.path.join(userprofile, '.hgrc')) |
|
471 | 471 | return path |
|
472 | 472 | |
|
473 | 473 | def revsingle(repo, revspec, default='.'): |
|
474 | 474 | if not revspec: |
|
475 | 475 | return repo[default] |
|
476 | 476 | |
|
477 | 477 | l = revrange(repo, [revspec]) |
|
478 | 478 | if len(l) < 1: |
|
479 | 479 | raise util.Abort(_('empty revision set')) |
|
480 | 480 | return repo[l[-1]] |
|
481 | 481 | |
|
482 | 482 | def revpair(repo, revs): |
|
483 | 483 | if not revs: |
|
484 | 484 | return repo.dirstate.p1(), None |
|
485 | 485 | |
|
486 | 486 | l = revrange(repo, revs) |
|
487 | 487 | |
|
488 | 488 | if len(l) == 0: |
|
489 | 489 | return repo.dirstate.p1(), None |
|
490 | 490 | |
|
491 | 491 | if len(l) == 1: |
|
492 | 492 | return repo.lookup(l[0]), None |
|
493 | 493 | |
|
494 | 494 | return repo.lookup(l[0]), repo.lookup(l[-1]) |
|
495 | 495 | |
|
496 | 496 | _revrangesep = ':' |
|
497 | 497 | |
|
498 | 498 | def revrange(repo, revs): |
|
499 | 499 | """Yield revision as strings from a list of revision specifications.""" |
|
500 | 500 | |
|
501 | 501 | def revfix(repo, val, defval): |
|
502 | 502 | if not val and val != 0 and defval is not None: |
|
503 | 503 | return defval |
|
504 | 504 | return repo.changelog.rev(repo.lookup(val)) |
|
505 | 505 | |
|
506 | 506 | seen, l = set(), [] |
|
507 | 507 | for spec in revs: |
|
508 | 508 | # attempt to parse old-style ranges first to deal with |
|
509 | 509 | # things like old-tag which contain query metacharacters |
|
510 | 510 | try: |
|
511 | 511 | if isinstance(spec, int): |
|
512 | 512 | seen.add(spec) |
|
513 | 513 | l.append(spec) |
|
514 | 514 | continue |
|
515 | 515 | |
|
516 | 516 | if _revrangesep in spec: |
|
517 | 517 | start, end = spec.split(_revrangesep, 1) |
|
518 | 518 | start = revfix(repo, start, 0) |
|
519 | 519 | end = revfix(repo, end, len(repo) - 1) |
|
520 | 520 | step = start > end and -1 or 1 |
|
521 | 521 | for rev in xrange(start, end + step, step): |
|
522 | 522 | if rev in seen: |
|
523 | 523 | continue |
|
524 | 524 | seen.add(rev) |
|
525 | 525 | l.append(rev) |
|
526 | 526 | continue |
|
527 | 527 | elif spec and spec in repo: # single unquoted rev |
|
528 | 528 | rev = revfix(repo, spec, None) |
|
529 | 529 | if rev in seen: |
|
530 | 530 | continue |
|
531 | 531 | seen.add(rev) |
|
532 | 532 | l.append(rev) |
|
533 | 533 | continue |
|
534 | 534 | except error.RepoLookupError: |
|
535 | 535 | pass |
|
536 | 536 | |
|
537 | 537 | # fall through to new-style queries if old-style fails |
|
538 | 538 | m = revset.match(repo.ui, spec) |
|
539 | 539 | for r in m(repo, range(len(repo))): |
|
540 | 540 | if r not in seen: |
|
541 | 541 | l.append(r) |
|
542 | 542 | seen.update(l) |
|
543 | 543 | |
|
544 | 544 | return l |
|
545 | 545 | |
|
546 | 546 | def expandpats(pats): |
|
547 | 547 | if not util.expandglobs: |
|
548 | 548 | return list(pats) |
|
549 | 549 | ret = [] |
|
550 | 550 | for p in pats: |
|
551 | 551 | kind, name = matchmod._patsplit(p, None) |
|
552 | 552 | if kind is None: |
|
553 | 553 | try: |
|
554 | 554 | globbed = glob.glob(name) |
|
555 | 555 | except re.error: |
|
556 | 556 | globbed = [name] |
|
557 | 557 | if globbed: |
|
558 | 558 | ret.extend(globbed) |
|
559 | 559 | continue |
|
560 | 560 | ret.append(p) |
|
561 | 561 | return ret |
|
562 | 562 | |
|
563 | 563 | def match(ctx, pats=[], opts={}, globbed=False, default='relpath'): |
|
564 | 564 | if pats == ("",): |
|
565 | 565 | pats = [] |
|
566 | 566 | if not globbed and default == 'relpath': |
|
567 | 567 | pats = expandpats(pats or []) |
|
568 | 568 | |
|
569 | 569 | m = ctx.match(pats, opts.get('include'), opts.get('exclude'), |
|
570 | 570 | default) |
|
571 | 571 | def badfn(f, msg): |
|
572 | 572 | ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg)) |
|
573 | 573 | m.bad = badfn |
|
574 | 574 | return m |
|
575 | 575 | |
|
576 | 576 | def matchall(repo): |
|
577 | 577 | return matchmod.always(repo.root, repo.getcwd()) |
|
578 | 578 | |
|
579 | 579 | def matchfiles(repo, files): |
|
580 | 580 | return matchmod.exact(repo.root, repo.getcwd(), files) |
|
581 | 581 | |
|
582 | 582 | def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None): |
|
583 | 583 | if dry_run is None: |
|
584 | 584 | dry_run = opts.get('dry_run') |
|
585 | 585 | if similarity is None: |
|
586 | 586 | similarity = float(opts.get('similarity') or 0) |
|
587 | 587 | # we'd use status here, except handling of symlinks and ignore is tricky |
|
588 | 588 | added, unknown, deleted, removed = [], [], [], [] |
|
589 | 589 | audit_path = pathauditor(repo.root) |
|
590 | 590 | m = match(repo[None], pats, opts) |
|
591 | 591 | for abs in repo.walk(m): |
|
592 | 592 | target = repo.wjoin(abs) |
|
593 | 593 | good = True |
|
594 | 594 | try: |
|
595 | 595 | audit_path(abs) |
|
596 | 596 | except (OSError, util.Abort): |
|
597 | 597 | good = False |
|
598 | 598 | rel = m.rel(abs) |
|
599 | 599 | exact = m.exact(abs) |
|
600 | 600 | if good and abs not in repo.dirstate: |
|
601 | 601 | unknown.append(abs) |
|
602 | 602 | if repo.ui.verbose or not exact: |
|
603 | 603 | repo.ui.status(_('adding %s\n') % ((pats and rel) or abs)) |
|
604 | 604 | elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target) |
|
605 | 605 | or (os.path.isdir(target) and not os.path.islink(target))): |
|
606 | 606 | deleted.append(abs) |
|
607 | 607 | if repo.ui.verbose or not exact: |
|
608 | 608 | repo.ui.status(_('removing %s\n') % ((pats and rel) or abs)) |
|
609 | 609 | # for finding renames |
|
610 | 610 | elif repo.dirstate[abs] == 'r': |
|
611 | 611 | removed.append(abs) |
|
612 | 612 | elif repo.dirstate[abs] == 'a': |
|
613 | 613 | added.append(abs) |
|
614 | 614 | copies = {} |
|
615 | 615 | if similarity > 0: |
|
616 | 616 | for old, new, score in similar.findrenames(repo, |
|
617 | 617 | added + unknown, removed + deleted, similarity): |
|
618 | 618 | if repo.ui.verbose or not m.exact(old) or not m.exact(new): |
|
619 | 619 | repo.ui.status(_('recording removal of %s as rename to %s ' |
|
620 | 620 | '(%d%% similar)\n') % |
|
621 | 621 | (m.rel(old), m.rel(new), score * 100)) |
|
622 | 622 | copies[new] = old |
|
623 | 623 | |
|
624 | 624 | if not dry_run: |
|
625 | 625 | wctx = repo[None] |
|
626 | 626 | wlock = repo.wlock() |
|
627 | 627 | try: |
|
628 | 628 | wctx.forget(deleted) |
|
629 | 629 | wctx.add(unknown) |
|
630 | 630 | for new, old in copies.iteritems(): |
|
631 | 631 | wctx.copy(old, new) |
|
632 | 632 | finally: |
|
633 | 633 | wlock.release() |
|
634 | 634 | |
|
635 | 635 | def updatedir(ui, repo, patches, similarity=0): |
|
636 | 636 | '''Update dirstate after patch application according to metadata''' |
|
637 | 637 | if not patches: |
|
638 | 638 | return [] |
|
639 | 639 | copies = [] |
|
640 | 640 | removes = set() |
|
641 | 641 | cfiles = patches.keys() |
|
642 | 642 | cwd = repo.getcwd() |
|
643 | 643 | if cwd: |
|
644 | 644 | cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()] |
|
645 | 645 | for f in patches: |
|
646 | 646 | gp = patches[f] |
|
647 | 647 | if not gp: |
|
648 | 648 | continue |
|
649 | 649 | if gp.op == 'RENAME': |
|
650 | 650 | copies.append((gp.oldpath, gp.path)) |
|
651 | 651 | removes.add(gp.oldpath) |
|
652 | 652 | elif gp.op == 'COPY': |
|
653 | 653 | copies.append((gp.oldpath, gp.path)) |
|
654 | 654 | elif gp.op == 'DELETE': |
|
655 | 655 | removes.add(gp.path) |
|
656 | 656 | |
|
657 | 657 | wctx = repo[None] |
|
658 | 658 | for src, dst in copies: |
|
659 | 659 | dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd) |
|
660 | 660 | if (not similarity) and removes: |
|
661 | 661 | wctx.remove(sorted(removes), True) |
|
662 | 662 | |
|
663 | 663 | for f in patches: |
|
664 | 664 | gp = patches[f] |
|
665 | 665 | if gp and gp.mode: |
|
666 | 666 | islink, isexec = gp.mode |
|
667 | 667 | dst = repo.wjoin(gp.path) |
|
668 | 668 | # patch won't create empty files |
|
669 | 669 | if gp.op == 'ADD' and not os.path.lexists(dst): |
|
670 | 670 | flags = (isexec and 'x' or '') + (islink and 'l' or '') |
|
671 | 671 | repo.wwrite(gp.path, '', flags) |
|
672 | 672 | util.setflags(dst, islink, isexec) |
|
673 | 673 | addremove(repo, cfiles, similarity=similarity) |
|
674 | 674 | files = patches.keys() |
|
675 | 675 | files.extend([r for r in removes if r not in files]) |
|
676 | 676 | return sorted(files) |
|
677 | 677 | |
|
678 | 678 | def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None): |
|
679 | 679 | """Update the dirstate to reflect the intent of copying src to dst. For |
|
680 | 680 | different reasons it might not end with dst being marked as copied from src. |
|
681 | 681 | """ |
|
682 | 682 | origsrc = repo.dirstate.copied(src) or src |
|
683 | 683 | if dst == origsrc: # copying back a copy? |
|
684 | 684 | if repo.dirstate[dst] not in 'mn' and not dryrun: |
|
685 | 685 | repo.dirstate.normallookup(dst) |
|
686 | 686 | else: |
|
687 | 687 | if repo.dirstate[origsrc] == 'a' and origsrc == src: |
|
688 | 688 | if not ui.quiet: |
|
689 | 689 | ui.warn(_("%s has not been committed yet, so no copy " |
|
690 | 690 | "data will be stored for %s.\n") |
|
691 | 691 | % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))) |
|
692 | 692 | if repo.dirstate[dst] in '?r' and not dryrun: |
|
693 | 693 | wctx.add([dst]) |
|
694 | 694 | elif not dryrun: |
|
695 | 695 | wctx.copy(origsrc, dst) |
|
696 | 696 | |
|
697 | 697 | def readrequires(opener, supported): |
|
698 | 698 | '''Reads and parses .hg/requires and checks if all entries found |
|
699 | 699 | are in the list of supported features.''' |
|
700 | 700 | requirements = set(opener.read("requires").splitlines()) |
|
701 | 701 | missings = [] |
|
702 | 702 | for r in requirements: |
|
703 | 703 | if r not in supported: |
|
704 | 704 | if not r or not r[0].isalnum(): |
|
705 | 705 | raise error.RequirementError(_(".hg/requires file is corrupt")) |
|
706 | 706 | missings.append(r) |
|
707 | 707 | missings.sort() |
|
708 | 708 | if missings: |
|
709 | 709 | raise error.RequirementError(_("unknown repository format: " |
|
710 | 710 | "requires features '%s' (upgrade Mercurial)") % "', '".join(missings)) |
|
711 | 711 | return requirements |
General Comments 0
You need to be logged in to leave comments.
Login now