##// END OF EJS Templates
fix broken environment save/restore when a hook runs....
Vadim Gelfer -
r1880:05c7d75b default
parent child Browse files
Show More
@@ -1,177 +1,179 b''
1 #!/bin/sh
1 #!/bin/sh
2 #
2 #
3 # hgmerge - default merge helper for Mercurial
3 # hgmerge - default merge helper for Mercurial
4 #
4 #
5 # This tries to find a way to do three-way merge on the current system.
5 # This tries to find a way to do three-way merge on the current system.
6 # The result ought to end up in $1.
6 # The result ought to end up in $1.
7
7
8 set -e # bail out quickly on failure
8 set -e # bail out quickly on failure
9
9
10 echo $1 $2 $3
11
10 LOCAL="$1"
12 LOCAL="$1"
11 BASE="$2"
13 BASE="$2"
12 OTHER="$3"
14 OTHER="$3"
13
15
14 if [ -z "$EDITOR" ]; then
16 if [ -z "$EDITOR" ]; then
15 EDITOR="vi"
17 EDITOR="vi"
16 fi
18 fi
17
19
18 # find decent versions of our utilities, insisting on the GNU versions where we
20 # find decent versions of our utilities, insisting on the GNU versions where we
19 # need to
21 # need to
20 MERGE="merge"
22 MERGE="merge"
21 DIFF3="gdiff3"
23 DIFF3="gdiff3"
22 DIFF="gdiff"
24 DIFF="gdiff"
23 PATCH="gpatch"
25 PATCH="gpatch"
24
26
25 type "$MERGE" >/dev/null 2>&1 || MERGE=
27 type "$MERGE" >/dev/null 2>&1 || MERGE=
26 type "$DIFF3" >/dev/null 2>&1 || DIFF3="diff3"
28 type "$DIFF3" >/dev/null 2>&1 || DIFF3="diff3"
27 $DIFF3 --version >/dev/null 2>&1 || DIFF3=
29 $DIFF3 --version >/dev/null 2>&1 || DIFF3=
28 type "$DIFF" >/dev/null 2>&1 || DIFF="diff"
30 type "$DIFF" >/dev/null 2>&1 || DIFF="diff"
29 type "$DIFF" >/dev/null 2>&1 || DIFF=
31 type "$DIFF" >/dev/null 2>&1 || DIFF=
30 type "$PATCH" >/dev/null 2>&1 || PATCH="patch"
32 type "$PATCH" >/dev/null 2>&1 || PATCH="patch"
31 type "$PATCH" >/dev/null 2>&1 || PATCH=
33 type "$PATCH" >/dev/null 2>&1 || PATCH=
32
34
33 # find optional visual utilities
35 # find optional visual utilities
34 FILEMERGE="/Developer/Applications/Utilities/FileMerge.app/Contents/MacOS/FileMerge"
36 FILEMERGE="/Developer/Applications/Utilities/FileMerge.app/Contents/MacOS/FileMerge"
35 KDIFF3="kdiff3"
37 KDIFF3="kdiff3"
36 TKDIFF="tkdiff"
38 TKDIFF="tkdiff"
37 MELD="meld"
39 MELD="meld"
38
40
39 type "$FILEMERGE" >/dev/null 2>&1 || FILEMERGE=
41 type "$FILEMERGE" >/dev/null 2>&1 || FILEMERGE=
40 type "$KDIFF3" >/dev/null 2>&1 || KDIFF3=
42 type "$KDIFF3" >/dev/null 2>&1 || KDIFF3=
41 type "$TKDIFF" >/dev/null 2>&1 || TKDIFF=
43 type "$TKDIFF" >/dev/null 2>&1 || TKDIFF=
42 type "$MELD" >/dev/null 2>&1 || MELD=
44 type "$MELD" >/dev/null 2>&1 || MELD=
43
45
44 # random part of names
46 # random part of names
45 RAND="$RANDOM$RANDOM"
47 RAND="$RANDOM$RANDOM"
46
48
47 # temporary directory for diff+patch merge
49 # temporary directory for diff+patch merge
48 HGTMP="${TMPDIR-/tmp}/hgmerge.$RAND"
50 HGTMP="${TMPDIR-/tmp}/hgmerge.$RAND"
49
51
50 # backup file
52 # backup file
51 BACKUP="$LOCAL.orig.$RAND"
53 BACKUP="$LOCAL.orig.$RAND"
52
54
53 # file used to test for file change
55 # file used to test for file change
54 CHGTEST="$LOCAL.chg.$RAND"
56 CHGTEST="$LOCAL.chg.$RAND"
55
57
56 # put all your required cleanup here
58 # put all your required cleanup here
57 cleanup() {
59 cleanup() {
58 rm -f "$BACKUP" "$CHGTEST"
60 rm -f "$BACKUP" "$CHGTEST"
59 rm -rf "$HGTMP"
61 rm -rf "$HGTMP"
60 }
62 }
61
63
62 # functions concerning program exit
64 # functions concerning program exit
63 success() {
65 success() {
64 cleanup
66 cleanup
65 exit 0
67 exit 0
66 }
68 }
67
69
68 failure() {
70 failure() {
69 echo "merge failed" 1>&2
71 echo "merge failed" 1>&2
70 mv "$BACKUP" "$LOCAL"
72 mv "$BACKUP" "$LOCAL"
71 cleanup
73 cleanup
72 exit 1
74 exit 1
73 }
75 }
74
76
75 # Ask if the merge was successful
77 # Ask if the merge was successful
76 ask_if_merged() {
78 ask_if_merged() {
77 while true; do
79 while true; do
78 echo "$LOCAL seems unchanged."
80 echo "$LOCAL seems unchanged."
79 echo "Was the merge successful? [y/n]"
81 echo "Was the merge successful? [y/n]"
80 read answer
82 read answer
81 case "$answer" in
83 case "$answer" in
82 y*|Y*) success;;
84 y*|Y*) success;;
83 n*|N*) failure;;
85 n*|N*) failure;;
84 esac
86 esac
85 done
87 done
86 }
88 }
87
89
88 # Clean up when interrupted
90 # Clean up when interrupted
89 trap "failure" 1 2 3 6 15 # HUP INT QUIT ABRT TERM
91 trap "failure" 1 2 3 6 15 # HUP INT QUIT ABRT TERM
90
92
91 # Back up our file (and try hard to keep the mtime unchanged)
93 # Back up our file (and try hard to keep the mtime unchanged)
92 mv "$LOCAL" "$BACKUP"
94 mv "$LOCAL" "$BACKUP"
93 cp "$BACKUP" "$LOCAL"
95 cp "$BACKUP" "$LOCAL"
94
96
95 # Attempt to do a non-interactive merge
97 # Attempt to do a non-interactive merge
96 if [ -n "$MERGE" -o -n "$DIFF3" ]; then
98 if [ -n "$MERGE" -o -n "$DIFF3" ]; then
97 if [ -n "$MERGE" ]; then
99 if [ -n "$MERGE" ]; then
98 $MERGE "$LOCAL" "$BASE" "$OTHER" 2> /dev/null && success
100 $MERGE "$LOCAL" "$BASE" "$OTHER" 2> /dev/null && success
99 elif [ -n "$DIFF3" ]; then
101 elif [ -n "$DIFF3" ]; then
100 $DIFF3 -m "$BACKUP" "$BASE" "$OTHER" > "$LOCAL" && success
102 $DIFF3 -m "$BACKUP" "$BASE" "$OTHER" > "$LOCAL" && success
101 fi
103 fi
102 if [ $? -gt 1 ]; then
104 if [ $? -gt 1 ]; then
103 echo "automatic merge failed! Exiting." 1>&2
105 echo "automatic merge failed! Exiting." 1>&2
104 failure
106 failure
105 fi
107 fi
106 fi
108 fi
107
109
108 # on MacOS X try FileMerge.app, shipped with Apple's developer tools
110 # on MacOS X try FileMerge.app, shipped with Apple's developer tools
109 if [ -n "$FILEMERGE" ]; then
111 if [ -n "$FILEMERGE" ]; then
110 cp "$BACKUP" "$LOCAL"
112 cp "$BACKUP" "$LOCAL"
111 cp "$BACKUP" "$CHGTEST"
113 cp "$BACKUP" "$CHGTEST"
112 # filemerge prefers the right by default
114 # filemerge prefers the right by default
113 $FILEMERGE -left "$OTHER" -right "$LOCAL" -ancestor "$BASE" -merge "$LOCAL"
115 $FILEMERGE -left "$OTHER" -right "$LOCAL" -ancestor "$BASE" -merge "$LOCAL"
114 [ $? -ne 0 ] && echo "FileMerge failed to launch" && failure
116 [ $? -ne 0 ] && echo "FileMerge failed to launch" && failure
115 test "$LOCAL" -nt "$CHGTEST" && success || ask_if_merged
117 test "$LOCAL" -nt "$CHGTEST" && success || ask_if_merged
116 fi
118 fi
117
119
118 if [ -n "$DISPLAY" ]; then
120 if [ -n "$DISPLAY" ]; then
119 # try using kdiff3, which is fairly nice
121 # try using kdiff3, which is fairly nice
120 if [ -n "$KDIFF3" ]; then
122 if [ -n "$KDIFF3" ]; then
121 $KDIFF3 --auto "$BASE" "$BACKUP" "$OTHER" -o "$LOCAL" || failure
123 $KDIFF3 --auto "$BASE" "$BACKUP" "$OTHER" -o "$LOCAL" || failure
122 success
124 success
123 fi
125 fi
124
126
125 # try using tkdiff, which is a bit less sophisticated
127 # try using tkdiff, which is a bit less sophisticated
126 if [ -n "$TKDIFF" ]; then
128 if [ -n "$TKDIFF" ]; then
127 $TKDIFF "$BACKUP" "$OTHER" -a "$BASE" -o "$LOCAL" || failure
129 $TKDIFF "$BACKUP" "$OTHER" -a "$BASE" -o "$LOCAL" || failure
128 success
130 success
129 fi
131 fi
130
132
131 if [ -n "$MELD" ]; then
133 if [ -n "$MELD" ]; then
132 cp "$BACKUP" "$CHGTEST"
134 cp "$BACKUP" "$CHGTEST"
133 # protect our feet - meld allows us to save to the left file
135 # protect our feet - meld allows us to save to the left file
134 cp "$BACKUP" "$LOCAL.tmp.$RAND"
136 cp "$BACKUP" "$LOCAL.tmp.$RAND"
135 # Meld doesn't have automatic merging, so to reduce intervention
137 # Meld doesn't have automatic merging, so to reduce intervention
136 # use the file with conflicts
138 # use the file with conflicts
137 $MELD "$LOCAL.tmp.$RAND" "$LOCAL" "$OTHER" || failure
139 $MELD "$LOCAL.tmp.$RAND" "$LOCAL" "$OTHER" || failure
138 # Also it doesn't return good error code
140 # Also it doesn't return good error code
139 test "$LOCAL" -nt "$CHGTEST" && success || ask_if_merged
141 test "$LOCAL" -nt "$CHGTEST" && success || ask_if_merged
140 fi
142 fi
141 fi
143 fi
142
144
143 # Attempt to do a merge with $EDITOR
145 # Attempt to do a merge with $EDITOR
144 if [ -n "$MERGE" -o -n "$DIFF3" ]; then
146 if [ -n "$MERGE" -o -n "$DIFF3" ]; then
145 echo "conflicts detected in $LOCAL"
147 echo "conflicts detected in $LOCAL"
146 cp "$BACKUP" "$CHGTEST"
148 cp "$BACKUP" "$CHGTEST"
147 $EDITOR "$LOCAL" || failure
149 $EDITOR "$LOCAL" || failure
148 # Some editors do not return meaningful error codes
150 # Some editors do not return meaningful error codes
149 # Do not take any chances
151 # Do not take any chances
150 test "$LOCAL" -nt "$CHGTEST" && success || ask_if_merged
152 test "$LOCAL" -nt "$CHGTEST" && success || ask_if_merged
151 fi
153 fi
152
154
153 # attempt to manually merge with diff and patch
155 # attempt to manually merge with diff and patch
154 if [ -n "$DIFF" -a -n "$PATCH" ]; then
156 if [ -n "$DIFF" -a -n "$PATCH" ]; then
155
157
156 (umask 077 && mkdir "$HGTMP") || {
158 (umask 077 && mkdir "$HGTMP") || {
157 echo "Could not create temporary directory $HGTMP" 1>&2
159 echo "Could not create temporary directory $HGTMP" 1>&2
158 failure
160 failure
159 }
161 }
160
162
161 $DIFF -u "$BASE" "$OTHER" > "$HGTMP/diff" || :
163 $DIFF -u "$BASE" "$OTHER" > "$HGTMP/diff" || :
162 if $PATCH "$LOCAL" < "$HGTMP/diff"; then
164 if $PATCH "$LOCAL" < "$HGTMP/diff"; then
163 success
165 success
164 else
166 else
165 # If rejects are empty after using the editor, merge was ok
167 # If rejects are empty after using the editor, merge was ok
166 $EDITOR "$LOCAL" "$LOCAL.rej" || failure
168 $EDITOR "$LOCAL" "$LOCAL.rej" || failure
167 test -s "$LOCAL.rej" || success
169 test -s "$LOCAL.rej" || success
168 fi
170 fi
169 failure
171 failure
170 fi
172 fi
171
173
172 echo
174 echo
173 echo "hgmerge: unable to find any merge utility!"
175 echo "hgmerge: unable to find any merge utility!"
174 echo "supported programs:"
176 echo "supported programs:"
175 echo "merge, FileMerge, tkdiff, kdiff3, meld, diff+patch"
177 echo "merge, FileMerge, tkdiff, kdiff3, meld, diff+patch"
176 echo
178 echo
177 failure
179 failure
@@ -1,1907 +1,1887 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import struct, os, util
8 import struct, os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14
14
15 class localrepository(object):
15 class localrepository(object):
16 def __del__(self):
16 def __del__(self):
17 self.transhandle = None
17 self.transhandle = None
18 def __init__(self, parentui, path=None, create=0):
18 def __init__(self, parentui, path=None, create=0):
19 if not path:
19 if not path:
20 p = os.getcwd()
20 p = os.getcwd()
21 while not os.path.isdir(os.path.join(p, ".hg")):
21 while not os.path.isdir(os.path.join(p, ".hg")):
22 oldp = p
22 oldp = p
23 p = os.path.dirname(p)
23 p = os.path.dirname(p)
24 if p == oldp:
24 if p == oldp:
25 raise repo.RepoError(_("no repo found"))
25 raise repo.RepoError(_("no repo found"))
26 path = p
26 path = p
27 self.path = os.path.join(path, ".hg")
27 self.path = os.path.join(path, ".hg")
28
28
29 if not create and not os.path.isdir(self.path):
29 if not create and not os.path.isdir(self.path):
30 raise repo.RepoError(_("repository %s not found") % path)
30 raise repo.RepoError(_("repository %s not found") % path)
31
31
32 self.root = os.path.abspath(path)
32 self.root = os.path.abspath(path)
33 self.ui = ui.ui(parentui=parentui)
33 self.ui = ui.ui(parentui=parentui)
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.manifest = manifest.manifest(self.opener)
36 self.manifest = manifest.manifest(self.opener)
37 self.changelog = changelog.changelog(self.opener)
37 self.changelog = changelog.changelog(self.opener)
38 self.tagscache = None
38 self.tagscache = None
39 self.nodetagscache = None
39 self.nodetagscache = None
40 self.encodepats = None
40 self.encodepats = None
41 self.decodepats = None
41 self.decodepats = None
42 self.transhandle = None
42 self.transhandle = None
43
43
44 if create:
44 if create:
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 os.mkdir(self.join("data"))
46 os.mkdir(self.join("data"))
47
47
48 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
48 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
49 try:
49 try:
50 self.ui.readconfig(self.join("hgrc"))
50 self.ui.readconfig(self.join("hgrc"))
51 except IOError:
51 except IOError:
52 pass
52 pass
53
53
54 def hook(self, name, throw=False, **args):
54 def hook(self, name, throw=False, **args):
55 def runhook(name, cmd):
55 def runhook(name, cmd):
56 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
56 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
57 old = {}
57 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
58 for k, v in args.items():
58 r = util.esystem(cmd, environ=env, cwd=self.root)
59 k = k.upper()
60 old['HG_' + k] = os.environ.get(k, None)
61 old[k] = os.environ.get(k, None)
62 os.environ['HG_' + k] = str(v)
63 os.environ[k] = str(v)
64
65 try:
66 # Hooks run in the repository root
67 olddir = os.getcwd()
68 os.chdir(self.root)
69 r = os.system(cmd)
70 finally:
71 for k, v in old.items():
72 if v is not None:
73 os.environ[k] = v
74 else:
75 del os.environ[k]
76
77 os.chdir(olddir)
78
79 if r:
59 if r:
80 desc, r = util.explain_exit(r)
60 desc, r = util.explain_exit(r)
81 if throw:
61 if throw:
82 raise util.Abort(_('%s hook %s') % (name, desc))
62 raise util.Abort(_('%s hook %s') % (name, desc))
83 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
63 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
84 return False
64 return False
85 return True
65 return True
86
66
87 r = True
67 r = True
88 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
68 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
89 if hname.split(".", 1)[0] == name and cmd]
69 if hname.split(".", 1)[0] == name and cmd]
90 hooks.sort()
70 hooks.sort()
91 for hname, cmd in hooks:
71 for hname, cmd in hooks:
92 r = runhook(hname, cmd) and r
72 r = runhook(hname, cmd) and r
93 return r
73 return r
94
74
95 def tags(self):
75 def tags(self):
96 '''return a mapping of tag to node'''
76 '''return a mapping of tag to node'''
97 if not self.tagscache:
77 if not self.tagscache:
98 self.tagscache = {}
78 self.tagscache = {}
99 def addtag(self, k, n):
79 def addtag(self, k, n):
100 try:
80 try:
101 bin_n = bin(n)
81 bin_n = bin(n)
102 except TypeError:
82 except TypeError:
103 bin_n = ''
83 bin_n = ''
104 self.tagscache[k.strip()] = bin_n
84 self.tagscache[k.strip()] = bin_n
105
85
106 try:
86 try:
107 # read each head of the tags file, ending with the tip
87 # read each head of the tags file, ending with the tip
108 # and add each tag found to the map, with "newer" ones
88 # and add each tag found to the map, with "newer" ones
109 # taking precedence
89 # taking precedence
110 fl = self.file(".hgtags")
90 fl = self.file(".hgtags")
111 h = fl.heads()
91 h = fl.heads()
112 h.reverse()
92 h.reverse()
113 for r in h:
93 for r in h:
114 for l in fl.read(r).splitlines():
94 for l in fl.read(r).splitlines():
115 if l:
95 if l:
116 n, k = l.split(" ", 1)
96 n, k = l.split(" ", 1)
117 addtag(self, k, n)
97 addtag(self, k, n)
118 except KeyError:
98 except KeyError:
119 pass
99 pass
120
100
121 try:
101 try:
122 f = self.opener("localtags")
102 f = self.opener("localtags")
123 for l in f:
103 for l in f:
124 n, k = l.split(" ", 1)
104 n, k = l.split(" ", 1)
125 addtag(self, k, n)
105 addtag(self, k, n)
126 except IOError:
106 except IOError:
127 pass
107 pass
128
108
129 self.tagscache['tip'] = self.changelog.tip()
109 self.tagscache['tip'] = self.changelog.tip()
130
110
131 return self.tagscache
111 return self.tagscache
132
112
133 def tagslist(self):
113 def tagslist(self):
134 '''return a list of tags ordered by revision'''
114 '''return a list of tags ordered by revision'''
135 l = []
115 l = []
136 for t, n in self.tags().items():
116 for t, n in self.tags().items():
137 try:
117 try:
138 r = self.changelog.rev(n)
118 r = self.changelog.rev(n)
139 except:
119 except:
140 r = -2 # sort to the beginning of the list if unknown
120 r = -2 # sort to the beginning of the list if unknown
141 l.append((r, t, n))
121 l.append((r, t, n))
142 l.sort()
122 l.sort()
143 return [(t, n) for r, t, n in l]
123 return [(t, n) for r, t, n in l]
144
124
145 def nodetags(self, node):
125 def nodetags(self, node):
146 '''return the tags associated with a node'''
126 '''return the tags associated with a node'''
147 if not self.nodetagscache:
127 if not self.nodetagscache:
148 self.nodetagscache = {}
128 self.nodetagscache = {}
149 for t, n in self.tags().items():
129 for t, n in self.tags().items():
150 self.nodetagscache.setdefault(n, []).append(t)
130 self.nodetagscache.setdefault(n, []).append(t)
151 return self.nodetagscache.get(node, [])
131 return self.nodetagscache.get(node, [])
152
132
153 def lookup(self, key):
133 def lookup(self, key):
154 try:
134 try:
155 return self.tags()[key]
135 return self.tags()[key]
156 except KeyError:
136 except KeyError:
157 try:
137 try:
158 return self.changelog.lookup(key)
138 return self.changelog.lookup(key)
159 except:
139 except:
160 raise repo.RepoError(_("unknown revision '%s'") % key)
140 raise repo.RepoError(_("unknown revision '%s'") % key)
161
141
162 def dev(self):
142 def dev(self):
163 return os.stat(self.path).st_dev
143 return os.stat(self.path).st_dev
164
144
165 def local(self):
145 def local(self):
166 return True
146 return True
167
147
168 def join(self, f):
148 def join(self, f):
169 return os.path.join(self.path, f)
149 return os.path.join(self.path, f)
170
150
171 def wjoin(self, f):
151 def wjoin(self, f):
172 return os.path.join(self.root, f)
152 return os.path.join(self.root, f)
173
153
174 def file(self, f):
154 def file(self, f):
175 if f[0] == '/':
155 if f[0] == '/':
176 f = f[1:]
156 f = f[1:]
177 return filelog.filelog(self.opener, f)
157 return filelog.filelog(self.opener, f)
178
158
179 def getcwd(self):
159 def getcwd(self):
180 return self.dirstate.getcwd()
160 return self.dirstate.getcwd()
181
161
182 def wfile(self, f, mode='r'):
162 def wfile(self, f, mode='r'):
183 return self.wopener(f, mode)
163 return self.wopener(f, mode)
184
164
185 def wread(self, filename):
165 def wread(self, filename):
186 if self.encodepats == None:
166 if self.encodepats == None:
187 l = []
167 l = []
188 for pat, cmd in self.ui.configitems("encode"):
168 for pat, cmd in self.ui.configitems("encode"):
189 mf = util.matcher("", "/", [pat], [], [])[1]
169 mf = util.matcher("", "/", [pat], [], [])[1]
190 l.append((mf, cmd))
170 l.append((mf, cmd))
191 self.encodepats = l
171 self.encodepats = l
192
172
193 data = self.wopener(filename, 'r').read()
173 data = self.wopener(filename, 'r').read()
194
174
195 for mf, cmd in self.encodepats:
175 for mf, cmd in self.encodepats:
196 if mf(filename):
176 if mf(filename):
197 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
177 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
198 data = util.filter(data, cmd)
178 data = util.filter(data, cmd)
199 break
179 break
200
180
201 return data
181 return data
202
182
203 def wwrite(self, filename, data, fd=None):
183 def wwrite(self, filename, data, fd=None):
204 if self.decodepats == None:
184 if self.decodepats == None:
205 l = []
185 l = []
206 for pat, cmd in self.ui.configitems("decode"):
186 for pat, cmd in self.ui.configitems("decode"):
207 mf = util.matcher("", "/", [pat], [], [])[1]
187 mf = util.matcher("", "/", [pat], [], [])[1]
208 l.append((mf, cmd))
188 l.append((mf, cmd))
209 self.decodepats = l
189 self.decodepats = l
210
190
211 for mf, cmd in self.decodepats:
191 for mf, cmd in self.decodepats:
212 if mf(filename):
192 if mf(filename):
213 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
193 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
214 data = util.filter(data, cmd)
194 data = util.filter(data, cmd)
215 break
195 break
216
196
217 if fd:
197 if fd:
218 return fd.write(data)
198 return fd.write(data)
219 return self.wopener(filename, 'w').write(data)
199 return self.wopener(filename, 'w').write(data)
220
200
221 def transaction(self):
201 def transaction(self):
222 tr = self.transhandle
202 tr = self.transhandle
223 if tr != None and tr.running():
203 if tr != None and tr.running():
224 return tr.nest()
204 return tr.nest()
225
205
226 # save dirstate for undo
206 # save dirstate for undo
227 try:
207 try:
228 ds = self.opener("dirstate").read()
208 ds = self.opener("dirstate").read()
229 except IOError:
209 except IOError:
230 ds = ""
210 ds = ""
231 self.opener("journal.dirstate", "w").write(ds)
211 self.opener("journal.dirstate", "w").write(ds)
232
212
233 tr = transaction.transaction(self.ui.warn, self.opener,
213 tr = transaction.transaction(self.ui.warn, self.opener,
234 self.join("journal"),
214 self.join("journal"),
235 aftertrans(self.path))
215 aftertrans(self.path))
236 self.transhandle = tr
216 self.transhandle = tr
237 return tr
217 return tr
238
218
239 def recover(self):
219 def recover(self):
240 l = self.lock()
220 l = self.lock()
241 if os.path.exists(self.join("journal")):
221 if os.path.exists(self.join("journal")):
242 self.ui.status(_("rolling back interrupted transaction\n"))
222 self.ui.status(_("rolling back interrupted transaction\n"))
243 transaction.rollback(self.opener, self.join("journal"))
223 transaction.rollback(self.opener, self.join("journal"))
244 self.reload()
224 self.reload()
245 return True
225 return True
246 else:
226 else:
247 self.ui.warn(_("no interrupted transaction available\n"))
227 self.ui.warn(_("no interrupted transaction available\n"))
248 return False
228 return False
249
229
250 def undo(self, wlock=None):
230 def undo(self, wlock=None):
251 if not wlock:
231 if not wlock:
252 wlock = self.wlock()
232 wlock = self.wlock()
253 l = self.lock()
233 l = self.lock()
254 if os.path.exists(self.join("undo")):
234 if os.path.exists(self.join("undo")):
255 self.ui.status(_("rolling back last transaction\n"))
235 self.ui.status(_("rolling back last transaction\n"))
256 transaction.rollback(self.opener, self.join("undo"))
236 transaction.rollback(self.opener, self.join("undo"))
257 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
237 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
258 self.reload()
238 self.reload()
259 self.wreload()
239 self.wreload()
260 else:
240 else:
261 self.ui.warn(_("no undo information available\n"))
241 self.ui.warn(_("no undo information available\n"))
262
242
263 def wreload(self):
243 def wreload(self):
264 self.dirstate.read()
244 self.dirstate.read()
265
245
266 def reload(self):
246 def reload(self):
267 self.changelog.load()
247 self.changelog.load()
268 self.manifest.load()
248 self.manifest.load()
269 self.tagscache = None
249 self.tagscache = None
270 self.nodetagscache = None
250 self.nodetagscache = None
271
251
272 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None):
252 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None):
273 try:
253 try:
274 l = lock.lock(self.join(lockname), 0, releasefn)
254 l = lock.lock(self.join(lockname), 0, releasefn)
275 except lock.LockHeld, inst:
255 except lock.LockHeld, inst:
276 if not wait:
256 if not wait:
277 raise inst
257 raise inst
278 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
258 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
279 try:
259 try:
280 # default to 600 seconds timeout
260 # default to 600 seconds timeout
281 l = lock.lock(self.join(lockname),
261 l = lock.lock(self.join(lockname),
282 int(self.ui.config("ui", "timeout") or 600),
262 int(self.ui.config("ui", "timeout") or 600),
283 releasefn)
263 releasefn)
284 except lock.LockHeld, inst:
264 except lock.LockHeld, inst:
285 raise util.Abort(_("timeout while waiting for "
265 raise util.Abort(_("timeout while waiting for "
286 "lock held by %s") % inst.args[0])
266 "lock held by %s") % inst.args[0])
287 if acquirefn:
267 if acquirefn:
288 acquirefn()
268 acquirefn()
289 return l
269 return l
290
270
291 def lock(self, wait=1):
271 def lock(self, wait=1):
292 return self.do_lock("lock", wait, acquirefn=self.reload)
272 return self.do_lock("lock", wait, acquirefn=self.reload)
293
273
294 def wlock(self, wait=1):
274 def wlock(self, wait=1):
295 return self.do_lock("wlock", wait,
275 return self.do_lock("wlock", wait,
296 self.dirstate.write,
276 self.dirstate.write,
297 self.wreload)
277 self.wreload)
298
278
299 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
279 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
300 "determine whether a new filenode is needed"
280 "determine whether a new filenode is needed"
301 fp1 = manifest1.get(filename, nullid)
281 fp1 = manifest1.get(filename, nullid)
302 fp2 = manifest2.get(filename, nullid)
282 fp2 = manifest2.get(filename, nullid)
303
283
304 if fp2 != nullid:
284 if fp2 != nullid:
305 # is one parent an ancestor of the other?
285 # is one parent an ancestor of the other?
306 fpa = filelog.ancestor(fp1, fp2)
286 fpa = filelog.ancestor(fp1, fp2)
307 if fpa == fp1:
287 if fpa == fp1:
308 fp1, fp2 = fp2, nullid
288 fp1, fp2 = fp2, nullid
309 elif fpa == fp2:
289 elif fpa == fp2:
310 fp2 = nullid
290 fp2 = nullid
311
291
312 # is the file unmodified from the parent? report existing entry
292 # is the file unmodified from the parent? report existing entry
313 if fp2 == nullid and text == filelog.read(fp1):
293 if fp2 == nullid and text == filelog.read(fp1):
314 return (fp1, None, None)
294 return (fp1, None, None)
315
295
316 return (None, fp1, fp2)
296 return (None, fp1, fp2)
317
297
318 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
298 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
319 orig_parent = self.dirstate.parents()[0] or nullid
299 orig_parent = self.dirstate.parents()[0] or nullid
320 p1 = p1 or self.dirstate.parents()[0] or nullid
300 p1 = p1 or self.dirstate.parents()[0] or nullid
321 p2 = p2 or self.dirstate.parents()[1] or nullid
301 p2 = p2 or self.dirstate.parents()[1] or nullid
322 c1 = self.changelog.read(p1)
302 c1 = self.changelog.read(p1)
323 c2 = self.changelog.read(p2)
303 c2 = self.changelog.read(p2)
324 m1 = self.manifest.read(c1[0])
304 m1 = self.manifest.read(c1[0])
325 mf1 = self.manifest.readflags(c1[0])
305 mf1 = self.manifest.readflags(c1[0])
326 m2 = self.manifest.read(c2[0])
306 m2 = self.manifest.read(c2[0])
327 changed = []
307 changed = []
328
308
329 if orig_parent == p1:
309 if orig_parent == p1:
330 update_dirstate = 1
310 update_dirstate = 1
331 else:
311 else:
332 update_dirstate = 0
312 update_dirstate = 0
333
313
334 if not wlock:
314 if not wlock:
335 wlock = self.wlock()
315 wlock = self.wlock()
336 l = self.lock()
316 l = self.lock()
337 tr = self.transaction()
317 tr = self.transaction()
338 mm = m1.copy()
318 mm = m1.copy()
339 mfm = mf1.copy()
319 mfm = mf1.copy()
340 linkrev = self.changelog.count()
320 linkrev = self.changelog.count()
341 for f in files:
321 for f in files:
342 try:
322 try:
343 t = self.wread(f)
323 t = self.wread(f)
344 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
324 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
345 r = self.file(f)
325 r = self.file(f)
346 mfm[f] = tm
326 mfm[f] = tm
347
327
348 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
328 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
349 if entry:
329 if entry:
350 mm[f] = entry
330 mm[f] = entry
351 continue
331 continue
352
332
353 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
333 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
354 changed.append(f)
334 changed.append(f)
355 if update_dirstate:
335 if update_dirstate:
356 self.dirstate.update([f], "n")
336 self.dirstate.update([f], "n")
357 except IOError:
337 except IOError:
358 try:
338 try:
359 del mm[f]
339 del mm[f]
360 del mfm[f]
340 del mfm[f]
361 if update_dirstate:
341 if update_dirstate:
362 self.dirstate.forget([f])
342 self.dirstate.forget([f])
363 except:
343 except:
364 # deleted from p2?
344 # deleted from p2?
365 pass
345 pass
366
346
367 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
347 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
368 user = user or self.ui.username()
348 user = user or self.ui.username()
369 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
349 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
370 tr.close()
350 tr.close()
371 if update_dirstate:
351 if update_dirstate:
372 self.dirstate.setparents(n, nullid)
352 self.dirstate.setparents(n, nullid)
373
353
374 def commit(self, files=None, text="", user=None, date=None,
354 def commit(self, files=None, text="", user=None, date=None,
375 match=util.always, force=False, lock=None, wlock=None):
355 match=util.always, force=False, lock=None, wlock=None):
376 commit = []
356 commit = []
377 remove = []
357 remove = []
378 changed = []
358 changed = []
379
359
380 if files:
360 if files:
381 for f in files:
361 for f in files:
382 s = self.dirstate.state(f)
362 s = self.dirstate.state(f)
383 if s in 'nmai':
363 if s in 'nmai':
384 commit.append(f)
364 commit.append(f)
385 elif s == 'r':
365 elif s == 'r':
386 remove.append(f)
366 remove.append(f)
387 else:
367 else:
388 self.ui.warn(_("%s not tracked!\n") % f)
368 self.ui.warn(_("%s not tracked!\n") % f)
389 else:
369 else:
390 modified, added, removed, deleted, unknown = self.changes(match=match)
370 modified, added, removed, deleted, unknown = self.changes(match=match)
391 commit = modified + added
371 commit = modified + added
392 remove = removed
372 remove = removed
393
373
394 p1, p2 = self.dirstate.parents()
374 p1, p2 = self.dirstate.parents()
395 c1 = self.changelog.read(p1)
375 c1 = self.changelog.read(p1)
396 c2 = self.changelog.read(p2)
376 c2 = self.changelog.read(p2)
397 m1 = self.manifest.read(c1[0])
377 m1 = self.manifest.read(c1[0])
398 mf1 = self.manifest.readflags(c1[0])
378 mf1 = self.manifest.readflags(c1[0])
399 m2 = self.manifest.read(c2[0])
379 m2 = self.manifest.read(c2[0])
400
380
401 if not commit and not remove and not force and p2 == nullid:
381 if not commit and not remove and not force and p2 == nullid:
402 self.ui.status(_("nothing changed\n"))
382 self.ui.status(_("nothing changed\n"))
403 return None
383 return None
404
384
405 xp1 = hex(p1)
385 xp1 = hex(p1)
406 if p2 == nullid: xp2 = ''
386 if p2 == nullid: xp2 = ''
407 else: xp2 = hex(p2)
387 else: xp2 = hex(p2)
408
388
409 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
389 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
410
390
411 if not wlock:
391 if not wlock:
412 wlock = self.wlock()
392 wlock = self.wlock()
413 if not lock:
393 if not lock:
414 lock = self.lock()
394 lock = self.lock()
415 tr = self.transaction()
395 tr = self.transaction()
416
396
417 # check in files
397 # check in files
418 new = {}
398 new = {}
419 linkrev = self.changelog.count()
399 linkrev = self.changelog.count()
420 commit.sort()
400 commit.sort()
421 for f in commit:
401 for f in commit:
422 self.ui.note(f + "\n")
402 self.ui.note(f + "\n")
423 try:
403 try:
424 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
404 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
425 t = self.wread(f)
405 t = self.wread(f)
426 except IOError:
406 except IOError:
427 self.ui.warn(_("trouble committing %s!\n") % f)
407 self.ui.warn(_("trouble committing %s!\n") % f)
428 raise
408 raise
429
409
430 r = self.file(f)
410 r = self.file(f)
431
411
432 meta = {}
412 meta = {}
433 cp = self.dirstate.copied(f)
413 cp = self.dirstate.copied(f)
434 if cp:
414 if cp:
435 meta["copy"] = cp
415 meta["copy"] = cp
436 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
416 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
437 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
417 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
438 fp1, fp2 = nullid, nullid
418 fp1, fp2 = nullid, nullid
439 else:
419 else:
440 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
420 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
441 if entry:
421 if entry:
442 new[f] = entry
422 new[f] = entry
443 continue
423 continue
444
424
445 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
425 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
446 # remember what we've added so that we can later calculate
426 # remember what we've added so that we can later calculate
447 # the files to pull from a set of changesets
427 # the files to pull from a set of changesets
448 changed.append(f)
428 changed.append(f)
449
429
450 # update manifest
430 # update manifest
451 m1 = m1.copy()
431 m1 = m1.copy()
452 m1.update(new)
432 m1.update(new)
453 for f in remove:
433 for f in remove:
454 if f in m1:
434 if f in m1:
455 del m1[f]
435 del m1[f]
456 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
436 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
457 (new, remove))
437 (new, remove))
458
438
459 # add changeset
439 # add changeset
460 new = new.keys()
440 new = new.keys()
461 new.sort()
441 new.sort()
462
442
463 if not text:
443 if not text:
464 edittext = [""]
444 edittext = [""]
465 if p2 != nullid:
445 if p2 != nullid:
466 edittext.append("HG: branch merge")
446 edittext.append("HG: branch merge")
467 edittext.extend(["HG: changed %s" % f for f in changed])
447 edittext.extend(["HG: changed %s" % f for f in changed])
468 edittext.extend(["HG: removed %s" % f for f in remove])
448 edittext.extend(["HG: removed %s" % f for f in remove])
469 if not changed and not remove:
449 if not changed and not remove:
470 edittext.append("HG: no files changed")
450 edittext.append("HG: no files changed")
471 edittext.append("")
451 edittext.append("")
472 # run editor in the repository root
452 # run editor in the repository root
473 olddir = os.getcwd()
453 olddir = os.getcwd()
474 os.chdir(self.root)
454 os.chdir(self.root)
475 edittext = self.ui.edit("\n".join(edittext))
455 edittext = self.ui.edit("\n".join(edittext))
476 os.chdir(olddir)
456 os.chdir(olddir)
477 if not edittext.rstrip():
457 if not edittext.rstrip():
478 return None
458 return None
479 text = edittext
459 text = edittext
480
460
481 user = user or self.ui.username()
461 user = user or self.ui.username()
482 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
462 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
483 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
463 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
484 parent2=xp2)
464 parent2=xp2)
485 tr.close()
465 tr.close()
486
466
487 self.dirstate.setparents(n)
467 self.dirstate.setparents(n)
488 self.dirstate.update(new, "n")
468 self.dirstate.update(new, "n")
489 self.dirstate.forget(remove)
469 self.dirstate.forget(remove)
490
470
491 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
471 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
492 return n
472 return n
493
473
494 def walk(self, node=None, files=[], match=util.always):
474 def walk(self, node=None, files=[], match=util.always):
495 if node:
475 if node:
496 fdict = dict.fromkeys(files)
476 fdict = dict.fromkeys(files)
497 for fn in self.manifest.read(self.changelog.read(node)[0]):
477 for fn in self.manifest.read(self.changelog.read(node)[0]):
498 fdict.pop(fn, None)
478 fdict.pop(fn, None)
499 if match(fn):
479 if match(fn):
500 yield 'm', fn
480 yield 'm', fn
501 for fn in fdict:
481 for fn in fdict:
502 self.ui.warn(_('%s: No such file in rev %s\n') % (
482 self.ui.warn(_('%s: No such file in rev %s\n') % (
503 util.pathto(self.getcwd(), fn), short(node)))
483 util.pathto(self.getcwd(), fn), short(node)))
504 else:
484 else:
505 for src, fn in self.dirstate.walk(files, match):
485 for src, fn in self.dirstate.walk(files, match):
506 yield src, fn
486 yield src, fn
507
487
508 def changes(self, node1=None, node2=None, files=[], match=util.always,
488 def changes(self, node1=None, node2=None, files=[], match=util.always,
509 wlock=None):
489 wlock=None):
510 """return changes between two nodes or node and working directory
490 """return changes between two nodes or node and working directory
511
491
512 If node1 is None, use the first dirstate parent instead.
492 If node1 is None, use the first dirstate parent instead.
513 If node2 is None, compare node1 with working directory.
493 If node2 is None, compare node1 with working directory.
514 """
494 """
515
495
516 def fcmp(fn, mf):
496 def fcmp(fn, mf):
517 t1 = self.wread(fn)
497 t1 = self.wread(fn)
518 t2 = self.file(fn).read(mf.get(fn, nullid))
498 t2 = self.file(fn).read(mf.get(fn, nullid))
519 return cmp(t1, t2)
499 return cmp(t1, t2)
520
500
521 def mfmatches(node):
501 def mfmatches(node):
522 change = self.changelog.read(node)
502 change = self.changelog.read(node)
523 mf = dict(self.manifest.read(change[0]))
503 mf = dict(self.manifest.read(change[0]))
524 for fn in mf.keys():
504 for fn in mf.keys():
525 if not match(fn):
505 if not match(fn):
526 del mf[fn]
506 del mf[fn]
527 return mf
507 return mf
528
508
529 if node1:
509 if node1:
530 # read the manifest from node1 before the manifest from node2,
510 # read the manifest from node1 before the manifest from node2,
531 # so that we'll hit the manifest cache if we're going through
511 # so that we'll hit the manifest cache if we're going through
532 # all the revisions in parent->child order.
512 # all the revisions in parent->child order.
533 mf1 = mfmatches(node1)
513 mf1 = mfmatches(node1)
534
514
535 # are we comparing the working directory?
515 # are we comparing the working directory?
536 if not node2:
516 if not node2:
537 if not wlock:
517 if not wlock:
538 try:
518 try:
539 wlock = self.wlock(wait=0)
519 wlock = self.wlock(wait=0)
540 except lock.LockException:
520 except lock.LockException:
541 wlock = None
521 wlock = None
542 lookup, modified, added, removed, deleted, unknown = (
522 lookup, modified, added, removed, deleted, unknown = (
543 self.dirstate.changes(files, match))
523 self.dirstate.changes(files, match))
544
524
545 # are we comparing working dir against its parent?
525 # are we comparing working dir against its parent?
546 if not node1:
526 if not node1:
547 if lookup:
527 if lookup:
548 # do a full compare of any files that might have changed
528 # do a full compare of any files that might have changed
549 mf2 = mfmatches(self.dirstate.parents()[0])
529 mf2 = mfmatches(self.dirstate.parents()[0])
550 for f in lookup:
530 for f in lookup:
551 if fcmp(f, mf2):
531 if fcmp(f, mf2):
552 modified.append(f)
532 modified.append(f)
553 elif wlock is not None:
533 elif wlock is not None:
554 self.dirstate.update([f], "n")
534 self.dirstate.update([f], "n")
555 else:
535 else:
556 # we are comparing working dir against non-parent
536 # we are comparing working dir against non-parent
557 # generate a pseudo-manifest for the working dir
537 # generate a pseudo-manifest for the working dir
558 mf2 = mfmatches(self.dirstate.parents()[0])
538 mf2 = mfmatches(self.dirstate.parents()[0])
559 for f in lookup + modified + added:
539 for f in lookup + modified + added:
560 mf2[f] = ""
540 mf2[f] = ""
561 for f in removed:
541 for f in removed:
562 if f in mf2:
542 if f in mf2:
563 del mf2[f]
543 del mf2[f]
564 else:
544 else:
565 # we are comparing two revisions
545 # we are comparing two revisions
566 deleted, unknown = [], []
546 deleted, unknown = [], []
567 mf2 = mfmatches(node2)
547 mf2 = mfmatches(node2)
568
548
569 if node1:
549 if node1:
570 # flush lists from dirstate before comparing manifests
550 # flush lists from dirstate before comparing manifests
571 modified, added = [], []
551 modified, added = [], []
572
552
573 for fn in mf2:
553 for fn in mf2:
574 if mf1.has_key(fn):
554 if mf1.has_key(fn):
575 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
555 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
576 modified.append(fn)
556 modified.append(fn)
577 del mf1[fn]
557 del mf1[fn]
578 else:
558 else:
579 added.append(fn)
559 added.append(fn)
580
560
581 removed = mf1.keys()
561 removed = mf1.keys()
582
562
583 # sort and return results:
563 # sort and return results:
584 for l in modified, added, removed, deleted, unknown:
564 for l in modified, added, removed, deleted, unknown:
585 l.sort()
565 l.sort()
586 return (modified, added, removed, deleted, unknown)
566 return (modified, added, removed, deleted, unknown)
587
567
588 def add(self, list, wlock=None):
568 def add(self, list, wlock=None):
589 if not wlock:
569 if not wlock:
590 wlock = self.wlock()
570 wlock = self.wlock()
591 for f in list:
571 for f in list:
592 p = self.wjoin(f)
572 p = self.wjoin(f)
593 if not os.path.exists(p):
573 if not os.path.exists(p):
594 self.ui.warn(_("%s does not exist!\n") % f)
574 self.ui.warn(_("%s does not exist!\n") % f)
595 elif not os.path.isfile(p):
575 elif not os.path.isfile(p):
596 self.ui.warn(_("%s not added: only files supported currently\n")
576 self.ui.warn(_("%s not added: only files supported currently\n")
597 % f)
577 % f)
598 elif self.dirstate.state(f) in 'an':
578 elif self.dirstate.state(f) in 'an':
599 self.ui.warn(_("%s already tracked!\n") % f)
579 self.ui.warn(_("%s already tracked!\n") % f)
600 else:
580 else:
601 self.dirstate.update([f], "a")
581 self.dirstate.update([f], "a")
602
582
603 def forget(self, list, wlock=None):
583 def forget(self, list, wlock=None):
604 if not wlock:
584 if not wlock:
605 wlock = self.wlock()
585 wlock = self.wlock()
606 for f in list:
586 for f in list:
607 if self.dirstate.state(f) not in 'ai':
587 if self.dirstate.state(f) not in 'ai':
608 self.ui.warn(_("%s not added!\n") % f)
588 self.ui.warn(_("%s not added!\n") % f)
609 else:
589 else:
610 self.dirstate.forget([f])
590 self.dirstate.forget([f])
611
591
612 def remove(self, list, unlink=False, wlock=None):
592 def remove(self, list, unlink=False, wlock=None):
613 if unlink:
593 if unlink:
614 for f in list:
594 for f in list:
615 try:
595 try:
616 util.unlink(self.wjoin(f))
596 util.unlink(self.wjoin(f))
617 except OSError, inst:
597 except OSError, inst:
618 if inst.errno != errno.ENOENT:
598 if inst.errno != errno.ENOENT:
619 raise
599 raise
620 if not wlock:
600 if not wlock:
621 wlock = self.wlock()
601 wlock = self.wlock()
622 for f in list:
602 for f in list:
623 p = self.wjoin(f)
603 p = self.wjoin(f)
624 if os.path.exists(p):
604 if os.path.exists(p):
625 self.ui.warn(_("%s still exists!\n") % f)
605 self.ui.warn(_("%s still exists!\n") % f)
626 elif self.dirstate.state(f) == 'a':
606 elif self.dirstate.state(f) == 'a':
627 self.dirstate.forget([f])
607 self.dirstate.forget([f])
628 elif f not in self.dirstate:
608 elif f not in self.dirstate:
629 self.ui.warn(_("%s not tracked!\n") % f)
609 self.ui.warn(_("%s not tracked!\n") % f)
630 else:
610 else:
631 self.dirstate.update([f], "r")
611 self.dirstate.update([f], "r")
632
612
633 def undelete(self, list, wlock=None):
613 def undelete(self, list, wlock=None):
634 p = self.dirstate.parents()[0]
614 p = self.dirstate.parents()[0]
635 mn = self.changelog.read(p)[0]
615 mn = self.changelog.read(p)[0]
636 mf = self.manifest.readflags(mn)
616 mf = self.manifest.readflags(mn)
637 m = self.manifest.read(mn)
617 m = self.manifest.read(mn)
638 if not wlock:
618 if not wlock:
639 wlock = self.wlock()
619 wlock = self.wlock()
640 for f in list:
620 for f in list:
641 if self.dirstate.state(f) not in "r":
621 if self.dirstate.state(f) not in "r":
642 self.ui.warn("%s not removed!\n" % f)
622 self.ui.warn("%s not removed!\n" % f)
643 else:
623 else:
644 t = self.file(f).read(m[f])
624 t = self.file(f).read(m[f])
645 self.wwrite(f, t)
625 self.wwrite(f, t)
646 util.set_exec(self.wjoin(f), mf[f])
626 util.set_exec(self.wjoin(f), mf[f])
647 self.dirstate.update([f], "n")
627 self.dirstate.update([f], "n")
648
628
649 def copy(self, source, dest, wlock=None):
629 def copy(self, source, dest, wlock=None):
650 p = self.wjoin(dest)
630 p = self.wjoin(dest)
651 if not os.path.exists(p):
631 if not os.path.exists(p):
652 self.ui.warn(_("%s does not exist!\n") % dest)
632 self.ui.warn(_("%s does not exist!\n") % dest)
653 elif not os.path.isfile(p):
633 elif not os.path.isfile(p):
654 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
634 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
655 else:
635 else:
656 if not wlock:
636 if not wlock:
657 wlock = self.wlock()
637 wlock = self.wlock()
658 if self.dirstate.state(dest) == '?':
638 if self.dirstate.state(dest) == '?':
659 self.dirstate.update([dest], "a")
639 self.dirstate.update([dest], "a")
660 self.dirstate.copy(source, dest)
640 self.dirstate.copy(source, dest)
661
641
662 def heads(self, start=None):
642 def heads(self, start=None):
663 heads = self.changelog.heads(start)
643 heads = self.changelog.heads(start)
664 # sort the output in rev descending order
644 # sort the output in rev descending order
665 heads = [(-self.changelog.rev(h), h) for h in heads]
645 heads = [(-self.changelog.rev(h), h) for h in heads]
666 heads.sort()
646 heads.sort()
667 return [n for (r, n) in heads]
647 return [n for (r, n) in heads]
668
648
669 # branchlookup returns a dict giving a list of branches for
649 # branchlookup returns a dict giving a list of branches for
670 # each head. A branch is defined as the tag of a node or
650 # each head. A branch is defined as the tag of a node or
671 # the branch of the node's parents. If a node has multiple
651 # the branch of the node's parents. If a node has multiple
672 # branch tags, tags are eliminated if they are visible from other
652 # branch tags, tags are eliminated if they are visible from other
673 # branch tags.
653 # branch tags.
674 #
654 #
675 # So, for this graph: a->b->c->d->e
655 # So, for this graph: a->b->c->d->e
676 # \ /
656 # \ /
677 # aa -----/
657 # aa -----/
678 # a has tag 2.6.12
658 # a has tag 2.6.12
679 # d has tag 2.6.13
659 # d has tag 2.6.13
680 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
660 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
681 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
661 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
682 # from the list.
662 # from the list.
683 #
663 #
684 # It is possible that more than one head will have the same branch tag.
664 # It is possible that more than one head will have the same branch tag.
685 # callers need to check the result for multiple heads under the same
665 # callers need to check the result for multiple heads under the same
686 # branch tag if that is a problem for them (ie checkout of a specific
666 # branch tag if that is a problem for them (ie checkout of a specific
687 # branch).
667 # branch).
688 #
668 #
689 # passing in a specific branch will limit the depth of the search
669 # passing in a specific branch will limit the depth of the search
690 # through the parents. It won't limit the branches returned in the
670 # through the parents. It won't limit the branches returned in the
691 # result though.
671 # result though.
692 def branchlookup(self, heads=None, branch=None):
672 def branchlookup(self, heads=None, branch=None):
693 if not heads:
673 if not heads:
694 heads = self.heads()
674 heads = self.heads()
695 headt = [ h for h in heads ]
675 headt = [ h for h in heads ]
696 chlog = self.changelog
676 chlog = self.changelog
697 branches = {}
677 branches = {}
698 merges = []
678 merges = []
699 seenmerge = {}
679 seenmerge = {}
700
680
701 # traverse the tree once for each head, recording in the branches
681 # traverse the tree once for each head, recording in the branches
702 # dict which tags are visible from this head. The branches
682 # dict which tags are visible from this head. The branches
703 # dict also records which tags are visible from each tag
683 # dict also records which tags are visible from each tag
704 # while we traverse.
684 # while we traverse.
705 while headt or merges:
685 while headt or merges:
706 if merges:
686 if merges:
707 n, found = merges.pop()
687 n, found = merges.pop()
708 visit = [n]
688 visit = [n]
709 else:
689 else:
710 h = headt.pop()
690 h = headt.pop()
711 visit = [h]
691 visit = [h]
712 found = [h]
692 found = [h]
713 seen = {}
693 seen = {}
714 while visit:
694 while visit:
715 n = visit.pop()
695 n = visit.pop()
716 if n in seen:
696 if n in seen:
717 continue
697 continue
718 pp = chlog.parents(n)
698 pp = chlog.parents(n)
719 tags = self.nodetags(n)
699 tags = self.nodetags(n)
720 if tags:
700 if tags:
721 for x in tags:
701 for x in tags:
722 if x == 'tip':
702 if x == 'tip':
723 continue
703 continue
724 for f in found:
704 for f in found:
725 branches.setdefault(f, {})[n] = 1
705 branches.setdefault(f, {})[n] = 1
726 branches.setdefault(n, {})[n] = 1
706 branches.setdefault(n, {})[n] = 1
727 break
707 break
728 if n not in found:
708 if n not in found:
729 found.append(n)
709 found.append(n)
730 if branch in tags:
710 if branch in tags:
731 continue
711 continue
732 seen[n] = 1
712 seen[n] = 1
733 if pp[1] != nullid and n not in seenmerge:
713 if pp[1] != nullid and n not in seenmerge:
734 merges.append((pp[1], [x for x in found]))
714 merges.append((pp[1], [x for x in found]))
735 seenmerge[n] = 1
715 seenmerge[n] = 1
736 if pp[0] != nullid:
716 if pp[0] != nullid:
737 visit.append(pp[0])
717 visit.append(pp[0])
738 # traverse the branches dict, eliminating branch tags from each
718 # traverse the branches dict, eliminating branch tags from each
739 # head that are visible from another branch tag for that head.
719 # head that are visible from another branch tag for that head.
740 out = {}
720 out = {}
741 viscache = {}
721 viscache = {}
742 for h in heads:
722 for h in heads:
743 def visible(node):
723 def visible(node):
744 if node in viscache:
724 if node in viscache:
745 return viscache[node]
725 return viscache[node]
746 ret = {}
726 ret = {}
747 visit = [node]
727 visit = [node]
748 while visit:
728 while visit:
749 x = visit.pop()
729 x = visit.pop()
750 if x in viscache:
730 if x in viscache:
751 ret.update(viscache[x])
731 ret.update(viscache[x])
752 elif x not in ret:
732 elif x not in ret:
753 ret[x] = 1
733 ret[x] = 1
754 if x in branches:
734 if x in branches:
755 visit[len(visit):] = branches[x].keys()
735 visit[len(visit):] = branches[x].keys()
756 viscache[node] = ret
736 viscache[node] = ret
757 return ret
737 return ret
758 if h not in branches:
738 if h not in branches:
759 continue
739 continue
760 # O(n^2), but somewhat limited. This only searches the
740 # O(n^2), but somewhat limited. This only searches the
761 # tags visible from a specific head, not all the tags in the
741 # tags visible from a specific head, not all the tags in the
762 # whole repo.
742 # whole repo.
763 for b in branches[h]:
743 for b in branches[h]:
764 vis = False
744 vis = False
765 for bb in branches[h].keys():
745 for bb in branches[h].keys():
766 if b != bb:
746 if b != bb:
767 if b in visible(bb):
747 if b in visible(bb):
768 vis = True
748 vis = True
769 break
749 break
770 if not vis:
750 if not vis:
771 l = out.setdefault(h, [])
751 l = out.setdefault(h, [])
772 l[len(l):] = self.nodetags(b)
752 l[len(l):] = self.nodetags(b)
773 return out
753 return out
774
754
775 def branches(self, nodes):
755 def branches(self, nodes):
776 if not nodes:
756 if not nodes:
777 nodes = [self.changelog.tip()]
757 nodes = [self.changelog.tip()]
778 b = []
758 b = []
779 for n in nodes:
759 for n in nodes:
780 t = n
760 t = n
781 while n:
761 while n:
782 p = self.changelog.parents(n)
762 p = self.changelog.parents(n)
783 if p[1] != nullid or p[0] == nullid:
763 if p[1] != nullid or p[0] == nullid:
784 b.append((t, n, p[0], p[1]))
764 b.append((t, n, p[0], p[1]))
785 break
765 break
786 n = p[0]
766 n = p[0]
787 return b
767 return b
788
768
789 def between(self, pairs):
769 def between(self, pairs):
790 r = []
770 r = []
791
771
792 for top, bottom in pairs:
772 for top, bottom in pairs:
793 n, l, i = top, [], 0
773 n, l, i = top, [], 0
794 f = 1
774 f = 1
795
775
796 while n != bottom:
776 while n != bottom:
797 p = self.changelog.parents(n)[0]
777 p = self.changelog.parents(n)[0]
798 if i == f:
778 if i == f:
799 l.append(n)
779 l.append(n)
800 f = f * 2
780 f = f * 2
801 n = p
781 n = p
802 i += 1
782 i += 1
803
783
804 r.append(l)
784 r.append(l)
805
785
806 return r
786 return r
807
787
808 def findincoming(self, remote, base=None, heads=None):
788 def findincoming(self, remote, base=None, heads=None):
809 m = self.changelog.nodemap
789 m = self.changelog.nodemap
810 search = []
790 search = []
811 fetch = {}
791 fetch = {}
812 seen = {}
792 seen = {}
813 seenbranch = {}
793 seenbranch = {}
814 if base == None:
794 if base == None:
815 base = {}
795 base = {}
816
796
817 # assume we're closer to the tip than the root
797 # assume we're closer to the tip than the root
818 # and start by examining the heads
798 # and start by examining the heads
819 self.ui.status(_("searching for changes\n"))
799 self.ui.status(_("searching for changes\n"))
820
800
821 if not heads:
801 if not heads:
822 heads = remote.heads()
802 heads = remote.heads()
823
803
824 unknown = []
804 unknown = []
825 for h in heads:
805 for h in heads:
826 if h not in m:
806 if h not in m:
827 unknown.append(h)
807 unknown.append(h)
828 else:
808 else:
829 base[h] = 1
809 base[h] = 1
830
810
831 if not unknown:
811 if not unknown:
832 return None
812 return None
833
813
834 rep = {}
814 rep = {}
835 reqcnt = 0
815 reqcnt = 0
836
816
837 # search through remote branches
817 # search through remote branches
838 # a 'branch' here is a linear segment of history, with four parts:
818 # a 'branch' here is a linear segment of history, with four parts:
839 # head, root, first parent, second parent
819 # head, root, first parent, second parent
840 # (a branch always has two parents (or none) by definition)
820 # (a branch always has two parents (or none) by definition)
841 unknown = remote.branches(unknown)
821 unknown = remote.branches(unknown)
842 while unknown:
822 while unknown:
843 r = []
823 r = []
844 while unknown:
824 while unknown:
845 n = unknown.pop(0)
825 n = unknown.pop(0)
846 if n[0] in seen:
826 if n[0] in seen:
847 continue
827 continue
848
828
849 self.ui.debug(_("examining %s:%s\n")
829 self.ui.debug(_("examining %s:%s\n")
850 % (short(n[0]), short(n[1])))
830 % (short(n[0]), short(n[1])))
851 if n[0] == nullid:
831 if n[0] == nullid:
852 break
832 break
853 if n in seenbranch:
833 if n in seenbranch:
854 self.ui.debug(_("branch already found\n"))
834 self.ui.debug(_("branch already found\n"))
855 continue
835 continue
856 if n[1] and n[1] in m: # do we know the base?
836 if n[1] and n[1] in m: # do we know the base?
857 self.ui.debug(_("found incomplete branch %s:%s\n")
837 self.ui.debug(_("found incomplete branch %s:%s\n")
858 % (short(n[0]), short(n[1])))
838 % (short(n[0]), short(n[1])))
859 search.append(n) # schedule branch range for scanning
839 search.append(n) # schedule branch range for scanning
860 seenbranch[n] = 1
840 seenbranch[n] = 1
861 else:
841 else:
862 if n[1] not in seen and n[1] not in fetch:
842 if n[1] not in seen and n[1] not in fetch:
863 if n[2] in m and n[3] in m:
843 if n[2] in m and n[3] in m:
864 self.ui.debug(_("found new changeset %s\n") %
844 self.ui.debug(_("found new changeset %s\n") %
865 short(n[1]))
845 short(n[1]))
866 fetch[n[1]] = 1 # earliest unknown
846 fetch[n[1]] = 1 # earliest unknown
867 base[n[2]] = 1 # latest known
847 base[n[2]] = 1 # latest known
868 continue
848 continue
869
849
870 for a in n[2:4]:
850 for a in n[2:4]:
871 if a not in rep:
851 if a not in rep:
872 r.append(a)
852 r.append(a)
873 rep[a] = 1
853 rep[a] = 1
874
854
875 seen[n[0]] = 1
855 seen[n[0]] = 1
876
856
877 if r:
857 if r:
878 reqcnt += 1
858 reqcnt += 1
879 self.ui.debug(_("request %d: %s\n") %
859 self.ui.debug(_("request %d: %s\n") %
880 (reqcnt, " ".join(map(short, r))))
860 (reqcnt, " ".join(map(short, r))))
881 for p in range(0, len(r), 10):
861 for p in range(0, len(r), 10):
882 for b in remote.branches(r[p:p+10]):
862 for b in remote.branches(r[p:p+10]):
883 self.ui.debug(_("received %s:%s\n") %
863 self.ui.debug(_("received %s:%s\n") %
884 (short(b[0]), short(b[1])))
864 (short(b[0]), short(b[1])))
885 if b[0] in m:
865 if b[0] in m:
886 self.ui.debug(_("found base node %s\n")
866 self.ui.debug(_("found base node %s\n")
887 % short(b[0]))
867 % short(b[0]))
888 base[b[0]] = 1
868 base[b[0]] = 1
889 elif b[0] not in seen:
869 elif b[0] not in seen:
890 unknown.append(b)
870 unknown.append(b)
891
871
892 # do binary search on the branches we found
872 # do binary search on the branches we found
893 while search:
873 while search:
894 n = search.pop(0)
874 n = search.pop(0)
895 reqcnt += 1
875 reqcnt += 1
896 l = remote.between([(n[0], n[1])])[0]
876 l = remote.between([(n[0], n[1])])[0]
897 l.append(n[1])
877 l.append(n[1])
898 p = n[0]
878 p = n[0]
899 f = 1
879 f = 1
900 for i in l:
880 for i in l:
901 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
881 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
902 if i in m:
882 if i in m:
903 if f <= 2:
883 if f <= 2:
904 self.ui.debug(_("found new branch changeset %s\n") %
884 self.ui.debug(_("found new branch changeset %s\n") %
905 short(p))
885 short(p))
906 fetch[p] = 1
886 fetch[p] = 1
907 base[i] = 1
887 base[i] = 1
908 else:
888 else:
909 self.ui.debug(_("narrowed branch search to %s:%s\n")
889 self.ui.debug(_("narrowed branch search to %s:%s\n")
910 % (short(p), short(i)))
890 % (short(p), short(i)))
911 search.append((p, i))
891 search.append((p, i))
912 break
892 break
913 p, f = i, f * 2
893 p, f = i, f * 2
914
894
915 # sanity check our fetch list
895 # sanity check our fetch list
916 for f in fetch.keys():
896 for f in fetch.keys():
917 if f in m:
897 if f in m:
918 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
898 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
919
899
920 if base.keys() == [nullid]:
900 if base.keys() == [nullid]:
921 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
901 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
922
902
923 self.ui.note(_("found new changesets starting at ") +
903 self.ui.note(_("found new changesets starting at ") +
924 " ".join([short(f) for f in fetch]) + "\n")
904 " ".join([short(f) for f in fetch]) + "\n")
925
905
926 self.ui.debug(_("%d total queries\n") % reqcnt)
906 self.ui.debug(_("%d total queries\n") % reqcnt)
927
907
928 return fetch.keys()
908 return fetch.keys()
929
909
930 def findoutgoing(self, remote, base=None, heads=None):
910 def findoutgoing(self, remote, base=None, heads=None):
931 if base == None:
911 if base == None:
932 base = {}
912 base = {}
933 self.findincoming(remote, base, heads)
913 self.findincoming(remote, base, heads)
934
914
935 self.ui.debug(_("common changesets up to ")
915 self.ui.debug(_("common changesets up to ")
936 + " ".join(map(short, base.keys())) + "\n")
916 + " ".join(map(short, base.keys())) + "\n")
937
917
938 remain = dict.fromkeys(self.changelog.nodemap)
918 remain = dict.fromkeys(self.changelog.nodemap)
939
919
940 # prune everything remote has from the tree
920 # prune everything remote has from the tree
941 del remain[nullid]
921 del remain[nullid]
942 remove = base.keys()
922 remove = base.keys()
943 while remove:
923 while remove:
944 n = remove.pop(0)
924 n = remove.pop(0)
945 if n in remain:
925 if n in remain:
946 del remain[n]
926 del remain[n]
947 for p in self.changelog.parents(n):
927 for p in self.changelog.parents(n):
948 remove.append(p)
928 remove.append(p)
949
929
950 # find every node whose parents have been pruned
930 # find every node whose parents have been pruned
951 subset = []
931 subset = []
952 for n in remain:
932 for n in remain:
953 p1, p2 = self.changelog.parents(n)
933 p1, p2 = self.changelog.parents(n)
954 if p1 not in remain and p2 not in remain:
934 if p1 not in remain and p2 not in remain:
955 subset.append(n)
935 subset.append(n)
956
936
957 # this is the set of all roots we have to push
937 # this is the set of all roots we have to push
958 return subset
938 return subset
959
939
960 def pull(self, remote, heads=None):
940 def pull(self, remote, heads=None):
961 l = self.lock()
941 l = self.lock()
962
942
963 # if we have an empty repo, fetch everything
943 # if we have an empty repo, fetch everything
964 if self.changelog.tip() == nullid:
944 if self.changelog.tip() == nullid:
965 self.ui.status(_("requesting all changes\n"))
945 self.ui.status(_("requesting all changes\n"))
966 fetch = [nullid]
946 fetch = [nullid]
967 else:
947 else:
968 fetch = self.findincoming(remote)
948 fetch = self.findincoming(remote)
969
949
970 if not fetch:
950 if not fetch:
971 self.ui.status(_("no changes found\n"))
951 self.ui.status(_("no changes found\n"))
972 return 1
952 return 1
973
953
974 if heads is None:
954 if heads is None:
975 cg = remote.changegroup(fetch, 'pull')
955 cg = remote.changegroup(fetch, 'pull')
976 else:
956 else:
977 cg = remote.changegroupsubset(fetch, heads, 'pull')
957 cg = remote.changegroupsubset(fetch, heads, 'pull')
978 return self.addchangegroup(cg)
958 return self.addchangegroup(cg)
979
959
980 def push(self, remote, force=False, revs=None):
960 def push(self, remote, force=False, revs=None):
981 lock = remote.lock()
961 lock = remote.lock()
982
962
983 base = {}
963 base = {}
984 heads = remote.heads()
964 heads = remote.heads()
985 inc = self.findincoming(remote, base, heads)
965 inc = self.findincoming(remote, base, heads)
986 if not force and inc:
966 if not force and inc:
987 self.ui.warn(_("abort: unsynced remote changes!\n"))
967 self.ui.warn(_("abort: unsynced remote changes!\n"))
988 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
968 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
989 return 1
969 return 1
990
970
991 update = self.findoutgoing(remote, base)
971 update = self.findoutgoing(remote, base)
992 if revs is not None:
972 if revs is not None:
993 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
973 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
994 else:
974 else:
995 bases, heads = update, self.changelog.heads()
975 bases, heads = update, self.changelog.heads()
996
976
997 if not bases:
977 if not bases:
998 self.ui.status(_("no changes found\n"))
978 self.ui.status(_("no changes found\n"))
999 return 1
979 return 1
1000 elif not force:
980 elif not force:
1001 if len(bases) < len(heads):
981 if len(bases) < len(heads):
1002 self.ui.warn(_("abort: push creates new remote branches!\n"))
982 self.ui.warn(_("abort: push creates new remote branches!\n"))
1003 self.ui.status(_("(did you forget to merge?"
983 self.ui.status(_("(did you forget to merge?"
1004 " use push -f to force)\n"))
984 " use push -f to force)\n"))
1005 return 1
985 return 1
1006
986
1007 if revs is None:
987 if revs is None:
1008 cg = self.changegroup(update, 'push')
988 cg = self.changegroup(update, 'push')
1009 else:
989 else:
1010 cg = self.changegroupsubset(update, revs, 'push')
990 cg = self.changegroupsubset(update, revs, 'push')
1011 return remote.addchangegroup(cg)
991 return remote.addchangegroup(cg)
1012
992
1013 def changegroupsubset(self, bases, heads, source):
993 def changegroupsubset(self, bases, heads, source):
1014 """This function generates a changegroup consisting of all the nodes
994 """This function generates a changegroup consisting of all the nodes
1015 that are descendents of any of the bases, and ancestors of any of
995 that are descendents of any of the bases, and ancestors of any of
1016 the heads.
996 the heads.
1017
997
1018 It is fairly complex as determining which filenodes and which
998 It is fairly complex as determining which filenodes and which
1019 manifest nodes need to be included for the changeset to be complete
999 manifest nodes need to be included for the changeset to be complete
1020 is non-trivial.
1000 is non-trivial.
1021
1001
1022 Another wrinkle is doing the reverse, figuring out which changeset in
1002 Another wrinkle is doing the reverse, figuring out which changeset in
1023 the changegroup a particular filenode or manifestnode belongs to."""
1003 the changegroup a particular filenode or manifestnode belongs to."""
1024
1004
1025 self.hook('preoutgoing', throw=True, source=source)
1005 self.hook('preoutgoing', throw=True, source=source)
1026
1006
1027 # Set up some initial variables
1007 # Set up some initial variables
1028 # Make it easy to refer to self.changelog
1008 # Make it easy to refer to self.changelog
1029 cl = self.changelog
1009 cl = self.changelog
1030 # msng is short for missing - compute the list of changesets in this
1010 # msng is short for missing - compute the list of changesets in this
1031 # changegroup.
1011 # changegroup.
1032 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1012 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1033 # Some bases may turn out to be superfluous, and some heads may be
1013 # Some bases may turn out to be superfluous, and some heads may be
1034 # too. nodesbetween will return the minimal set of bases and heads
1014 # too. nodesbetween will return the minimal set of bases and heads
1035 # necessary to re-create the changegroup.
1015 # necessary to re-create the changegroup.
1036
1016
1037 # Known heads are the list of heads that it is assumed the recipient
1017 # Known heads are the list of heads that it is assumed the recipient
1038 # of this changegroup will know about.
1018 # of this changegroup will know about.
1039 knownheads = {}
1019 knownheads = {}
1040 # We assume that all parents of bases are known heads.
1020 # We assume that all parents of bases are known heads.
1041 for n in bases:
1021 for n in bases:
1042 for p in cl.parents(n):
1022 for p in cl.parents(n):
1043 if p != nullid:
1023 if p != nullid:
1044 knownheads[p] = 1
1024 knownheads[p] = 1
1045 knownheads = knownheads.keys()
1025 knownheads = knownheads.keys()
1046 if knownheads:
1026 if knownheads:
1047 # Now that we know what heads are known, we can compute which
1027 # Now that we know what heads are known, we can compute which
1048 # changesets are known. The recipient must know about all
1028 # changesets are known. The recipient must know about all
1049 # changesets required to reach the known heads from the null
1029 # changesets required to reach the known heads from the null
1050 # changeset.
1030 # changeset.
1051 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1031 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1052 junk = None
1032 junk = None
1053 # Transform the list into an ersatz set.
1033 # Transform the list into an ersatz set.
1054 has_cl_set = dict.fromkeys(has_cl_set)
1034 has_cl_set = dict.fromkeys(has_cl_set)
1055 else:
1035 else:
1056 # If there were no known heads, the recipient cannot be assumed to
1036 # If there were no known heads, the recipient cannot be assumed to
1057 # know about any changesets.
1037 # know about any changesets.
1058 has_cl_set = {}
1038 has_cl_set = {}
1059
1039
1060 # Make it easy to refer to self.manifest
1040 # Make it easy to refer to self.manifest
1061 mnfst = self.manifest
1041 mnfst = self.manifest
1062 # We don't know which manifests are missing yet
1042 # We don't know which manifests are missing yet
1063 msng_mnfst_set = {}
1043 msng_mnfst_set = {}
1064 # Nor do we know which filenodes are missing.
1044 # Nor do we know which filenodes are missing.
1065 msng_filenode_set = {}
1045 msng_filenode_set = {}
1066
1046
1067 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1047 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1068 junk = None
1048 junk = None
1069
1049
1070 # A changeset always belongs to itself, so the changenode lookup
1050 # A changeset always belongs to itself, so the changenode lookup
1071 # function for a changenode is identity.
1051 # function for a changenode is identity.
1072 def identity(x):
1052 def identity(x):
1073 return x
1053 return x
1074
1054
1075 # A function generating function. Sets up an environment for the
1055 # A function generating function. Sets up an environment for the
1076 # inner function.
1056 # inner function.
1077 def cmp_by_rev_func(revlog):
1057 def cmp_by_rev_func(revlog):
1078 # Compare two nodes by their revision number in the environment's
1058 # Compare two nodes by their revision number in the environment's
1079 # revision history. Since the revision number both represents the
1059 # revision history. Since the revision number both represents the
1080 # most efficient order to read the nodes in, and represents a
1060 # most efficient order to read the nodes in, and represents a
1081 # topological sorting of the nodes, this function is often useful.
1061 # topological sorting of the nodes, this function is often useful.
1082 def cmp_by_rev(a, b):
1062 def cmp_by_rev(a, b):
1083 return cmp(revlog.rev(a), revlog.rev(b))
1063 return cmp(revlog.rev(a), revlog.rev(b))
1084 return cmp_by_rev
1064 return cmp_by_rev
1085
1065
1086 # If we determine that a particular file or manifest node must be a
1066 # If we determine that a particular file or manifest node must be a
1087 # node that the recipient of the changegroup will already have, we can
1067 # node that the recipient of the changegroup will already have, we can
1088 # also assume the recipient will have all the parents. This function
1068 # also assume the recipient will have all the parents. This function
1089 # prunes them from the set of missing nodes.
1069 # prunes them from the set of missing nodes.
1090 def prune_parents(revlog, hasset, msngset):
1070 def prune_parents(revlog, hasset, msngset):
1091 haslst = hasset.keys()
1071 haslst = hasset.keys()
1092 haslst.sort(cmp_by_rev_func(revlog))
1072 haslst.sort(cmp_by_rev_func(revlog))
1093 for node in haslst:
1073 for node in haslst:
1094 parentlst = [p for p in revlog.parents(node) if p != nullid]
1074 parentlst = [p for p in revlog.parents(node) if p != nullid]
1095 while parentlst:
1075 while parentlst:
1096 n = parentlst.pop()
1076 n = parentlst.pop()
1097 if n not in hasset:
1077 if n not in hasset:
1098 hasset[n] = 1
1078 hasset[n] = 1
1099 p = [p for p in revlog.parents(n) if p != nullid]
1079 p = [p for p in revlog.parents(n) if p != nullid]
1100 parentlst.extend(p)
1080 parentlst.extend(p)
1101 for n in hasset:
1081 for n in hasset:
1102 msngset.pop(n, None)
1082 msngset.pop(n, None)
1103
1083
1104 # This is a function generating function used to set up an environment
1084 # This is a function generating function used to set up an environment
1105 # for the inner function to execute in.
1085 # for the inner function to execute in.
1106 def manifest_and_file_collector(changedfileset):
1086 def manifest_and_file_collector(changedfileset):
1107 # This is an information gathering function that gathers
1087 # This is an information gathering function that gathers
1108 # information from each changeset node that goes out as part of
1088 # information from each changeset node that goes out as part of
1109 # the changegroup. The information gathered is a list of which
1089 # the changegroup. The information gathered is a list of which
1110 # manifest nodes are potentially required (the recipient may
1090 # manifest nodes are potentially required (the recipient may
1111 # already have them) and total list of all files which were
1091 # already have them) and total list of all files which were
1112 # changed in any changeset in the changegroup.
1092 # changed in any changeset in the changegroup.
1113 #
1093 #
1114 # We also remember the first changenode we saw any manifest
1094 # We also remember the first changenode we saw any manifest
1115 # referenced by so we can later determine which changenode 'owns'
1095 # referenced by so we can later determine which changenode 'owns'
1116 # the manifest.
1096 # the manifest.
1117 def collect_manifests_and_files(clnode):
1097 def collect_manifests_and_files(clnode):
1118 c = cl.read(clnode)
1098 c = cl.read(clnode)
1119 for f in c[3]:
1099 for f in c[3]:
1120 # This is to make sure we only have one instance of each
1100 # This is to make sure we only have one instance of each
1121 # filename string for each filename.
1101 # filename string for each filename.
1122 changedfileset.setdefault(f, f)
1102 changedfileset.setdefault(f, f)
1123 msng_mnfst_set.setdefault(c[0], clnode)
1103 msng_mnfst_set.setdefault(c[0], clnode)
1124 return collect_manifests_and_files
1104 return collect_manifests_and_files
1125
1105
1126 # Figure out which manifest nodes (of the ones we think might be part
1106 # Figure out which manifest nodes (of the ones we think might be part
1127 # of the changegroup) the recipient must know about and remove them
1107 # of the changegroup) the recipient must know about and remove them
1128 # from the changegroup.
1108 # from the changegroup.
1129 def prune_manifests():
1109 def prune_manifests():
1130 has_mnfst_set = {}
1110 has_mnfst_set = {}
1131 for n in msng_mnfst_set:
1111 for n in msng_mnfst_set:
1132 # If a 'missing' manifest thinks it belongs to a changenode
1112 # If a 'missing' manifest thinks it belongs to a changenode
1133 # the recipient is assumed to have, obviously the recipient
1113 # the recipient is assumed to have, obviously the recipient
1134 # must have that manifest.
1114 # must have that manifest.
1135 linknode = cl.node(mnfst.linkrev(n))
1115 linknode = cl.node(mnfst.linkrev(n))
1136 if linknode in has_cl_set:
1116 if linknode in has_cl_set:
1137 has_mnfst_set[n] = 1
1117 has_mnfst_set[n] = 1
1138 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1118 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1139
1119
1140 # Use the information collected in collect_manifests_and_files to say
1120 # Use the information collected in collect_manifests_and_files to say
1141 # which changenode any manifestnode belongs to.
1121 # which changenode any manifestnode belongs to.
1142 def lookup_manifest_link(mnfstnode):
1122 def lookup_manifest_link(mnfstnode):
1143 return msng_mnfst_set[mnfstnode]
1123 return msng_mnfst_set[mnfstnode]
1144
1124
1145 # A function generating function that sets up the initial environment
1125 # A function generating function that sets up the initial environment
1146 # the inner function.
1126 # the inner function.
1147 def filenode_collector(changedfiles):
1127 def filenode_collector(changedfiles):
1148 next_rev = [0]
1128 next_rev = [0]
1149 # This gathers information from each manifestnode included in the
1129 # This gathers information from each manifestnode included in the
1150 # changegroup about which filenodes the manifest node references
1130 # changegroup about which filenodes the manifest node references
1151 # so we can include those in the changegroup too.
1131 # so we can include those in the changegroup too.
1152 #
1132 #
1153 # It also remembers which changenode each filenode belongs to. It
1133 # It also remembers which changenode each filenode belongs to. It
1154 # does this by assuming the a filenode belongs to the changenode
1134 # does this by assuming the a filenode belongs to the changenode
1155 # the first manifest that references it belongs to.
1135 # the first manifest that references it belongs to.
1156 def collect_msng_filenodes(mnfstnode):
1136 def collect_msng_filenodes(mnfstnode):
1157 r = mnfst.rev(mnfstnode)
1137 r = mnfst.rev(mnfstnode)
1158 if r == next_rev[0]:
1138 if r == next_rev[0]:
1159 # If the last rev we looked at was the one just previous,
1139 # If the last rev we looked at was the one just previous,
1160 # we only need to see a diff.
1140 # we only need to see a diff.
1161 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1141 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1162 # For each line in the delta
1142 # For each line in the delta
1163 for dline in delta.splitlines():
1143 for dline in delta.splitlines():
1164 # get the filename and filenode for that line
1144 # get the filename and filenode for that line
1165 f, fnode = dline.split('\0')
1145 f, fnode = dline.split('\0')
1166 fnode = bin(fnode[:40])
1146 fnode = bin(fnode[:40])
1167 f = changedfiles.get(f, None)
1147 f = changedfiles.get(f, None)
1168 # And if the file is in the list of files we care
1148 # And if the file is in the list of files we care
1169 # about.
1149 # about.
1170 if f is not None:
1150 if f is not None:
1171 # Get the changenode this manifest belongs to
1151 # Get the changenode this manifest belongs to
1172 clnode = msng_mnfst_set[mnfstnode]
1152 clnode = msng_mnfst_set[mnfstnode]
1173 # Create the set of filenodes for the file if
1153 # Create the set of filenodes for the file if
1174 # there isn't one already.
1154 # there isn't one already.
1175 ndset = msng_filenode_set.setdefault(f, {})
1155 ndset = msng_filenode_set.setdefault(f, {})
1176 # And set the filenode's changelog node to the
1156 # And set the filenode's changelog node to the
1177 # manifest's if it hasn't been set already.
1157 # manifest's if it hasn't been set already.
1178 ndset.setdefault(fnode, clnode)
1158 ndset.setdefault(fnode, clnode)
1179 else:
1159 else:
1180 # Otherwise we need a full manifest.
1160 # Otherwise we need a full manifest.
1181 m = mnfst.read(mnfstnode)
1161 m = mnfst.read(mnfstnode)
1182 # For every file in we care about.
1162 # For every file in we care about.
1183 for f in changedfiles:
1163 for f in changedfiles:
1184 fnode = m.get(f, None)
1164 fnode = m.get(f, None)
1185 # If it's in the manifest
1165 # If it's in the manifest
1186 if fnode is not None:
1166 if fnode is not None:
1187 # See comments above.
1167 # See comments above.
1188 clnode = msng_mnfst_set[mnfstnode]
1168 clnode = msng_mnfst_set[mnfstnode]
1189 ndset = msng_filenode_set.setdefault(f, {})
1169 ndset = msng_filenode_set.setdefault(f, {})
1190 ndset.setdefault(fnode, clnode)
1170 ndset.setdefault(fnode, clnode)
1191 # Remember the revision we hope to see next.
1171 # Remember the revision we hope to see next.
1192 next_rev[0] = r + 1
1172 next_rev[0] = r + 1
1193 return collect_msng_filenodes
1173 return collect_msng_filenodes
1194
1174
1195 # We have a list of filenodes we think we need for a file, lets remove
1175 # We have a list of filenodes we think we need for a file, lets remove
1196 # all those we now the recipient must have.
1176 # all those we now the recipient must have.
1197 def prune_filenodes(f, filerevlog):
1177 def prune_filenodes(f, filerevlog):
1198 msngset = msng_filenode_set[f]
1178 msngset = msng_filenode_set[f]
1199 hasset = {}
1179 hasset = {}
1200 # If a 'missing' filenode thinks it belongs to a changenode we
1180 # If a 'missing' filenode thinks it belongs to a changenode we
1201 # assume the recipient must have, then the recipient must have
1181 # assume the recipient must have, then the recipient must have
1202 # that filenode.
1182 # that filenode.
1203 for n in msngset:
1183 for n in msngset:
1204 clnode = cl.node(filerevlog.linkrev(n))
1184 clnode = cl.node(filerevlog.linkrev(n))
1205 if clnode in has_cl_set:
1185 if clnode in has_cl_set:
1206 hasset[n] = 1
1186 hasset[n] = 1
1207 prune_parents(filerevlog, hasset, msngset)
1187 prune_parents(filerevlog, hasset, msngset)
1208
1188
1209 # A function generator function that sets up the a context for the
1189 # A function generator function that sets up the a context for the
1210 # inner function.
1190 # inner function.
1211 def lookup_filenode_link_func(fname):
1191 def lookup_filenode_link_func(fname):
1212 msngset = msng_filenode_set[fname]
1192 msngset = msng_filenode_set[fname]
1213 # Lookup the changenode the filenode belongs to.
1193 # Lookup the changenode the filenode belongs to.
1214 def lookup_filenode_link(fnode):
1194 def lookup_filenode_link(fnode):
1215 return msngset[fnode]
1195 return msngset[fnode]
1216 return lookup_filenode_link
1196 return lookup_filenode_link
1217
1197
1218 # Now that we have all theses utility functions to help out and
1198 # Now that we have all theses utility functions to help out and
1219 # logically divide up the task, generate the group.
1199 # logically divide up the task, generate the group.
1220 def gengroup():
1200 def gengroup():
1221 # The set of changed files starts empty.
1201 # The set of changed files starts empty.
1222 changedfiles = {}
1202 changedfiles = {}
1223 # Create a changenode group generator that will call our functions
1203 # Create a changenode group generator that will call our functions
1224 # back to lookup the owning changenode and collect information.
1204 # back to lookup the owning changenode and collect information.
1225 group = cl.group(msng_cl_lst, identity,
1205 group = cl.group(msng_cl_lst, identity,
1226 manifest_and_file_collector(changedfiles))
1206 manifest_and_file_collector(changedfiles))
1227 for chnk in group:
1207 for chnk in group:
1228 yield chnk
1208 yield chnk
1229
1209
1230 # The list of manifests has been collected by the generator
1210 # The list of manifests has been collected by the generator
1231 # calling our functions back.
1211 # calling our functions back.
1232 prune_manifests()
1212 prune_manifests()
1233 msng_mnfst_lst = msng_mnfst_set.keys()
1213 msng_mnfst_lst = msng_mnfst_set.keys()
1234 # Sort the manifestnodes by revision number.
1214 # Sort the manifestnodes by revision number.
1235 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1215 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1236 # Create a generator for the manifestnodes that calls our lookup
1216 # Create a generator for the manifestnodes that calls our lookup
1237 # and data collection functions back.
1217 # and data collection functions back.
1238 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1218 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1239 filenode_collector(changedfiles))
1219 filenode_collector(changedfiles))
1240 for chnk in group:
1220 for chnk in group:
1241 yield chnk
1221 yield chnk
1242
1222
1243 # These are no longer needed, dereference and toss the memory for
1223 # These are no longer needed, dereference and toss the memory for
1244 # them.
1224 # them.
1245 msng_mnfst_lst = None
1225 msng_mnfst_lst = None
1246 msng_mnfst_set.clear()
1226 msng_mnfst_set.clear()
1247
1227
1248 changedfiles = changedfiles.keys()
1228 changedfiles = changedfiles.keys()
1249 changedfiles.sort()
1229 changedfiles.sort()
1250 # Go through all our files in order sorted by name.
1230 # Go through all our files in order sorted by name.
1251 for fname in changedfiles:
1231 for fname in changedfiles:
1252 filerevlog = self.file(fname)
1232 filerevlog = self.file(fname)
1253 # Toss out the filenodes that the recipient isn't really
1233 # Toss out the filenodes that the recipient isn't really
1254 # missing.
1234 # missing.
1255 if msng_filenode_set.has_key(fname):
1235 if msng_filenode_set.has_key(fname):
1256 prune_filenodes(fname, filerevlog)
1236 prune_filenodes(fname, filerevlog)
1257 msng_filenode_lst = msng_filenode_set[fname].keys()
1237 msng_filenode_lst = msng_filenode_set[fname].keys()
1258 else:
1238 else:
1259 msng_filenode_lst = []
1239 msng_filenode_lst = []
1260 # If any filenodes are left, generate the group for them,
1240 # If any filenodes are left, generate the group for them,
1261 # otherwise don't bother.
1241 # otherwise don't bother.
1262 if len(msng_filenode_lst) > 0:
1242 if len(msng_filenode_lst) > 0:
1263 yield struct.pack(">l", len(fname) + 4) + fname
1243 yield struct.pack(">l", len(fname) + 4) + fname
1264 # Sort the filenodes by their revision #
1244 # Sort the filenodes by their revision #
1265 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1245 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1266 # Create a group generator and only pass in a changenode
1246 # Create a group generator and only pass in a changenode
1267 # lookup function as we need to collect no information
1247 # lookup function as we need to collect no information
1268 # from filenodes.
1248 # from filenodes.
1269 group = filerevlog.group(msng_filenode_lst,
1249 group = filerevlog.group(msng_filenode_lst,
1270 lookup_filenode_link_func(fname))
1250 lookup_filenode_link_func(fname))
1271 for chnk in group:
1251 for chnk in group:
1272 yield chnk
1252 yield chnk
1273 if msng_filenode_set.has_key(fname):
1253 if msng_filenode_set.has_key(fname):
1274 # Don't need this anymore, toss it to free memory.
1254 # Don't need this anymore, toss it to free memory.
1275 del msng_filenode_set[fname]
1255 del msng_filenode_set[fname]
1276 # Signal that no more groups are left.
1256 # Signal that no more groups are left.
1277 yield struct.pack(">l", 0)
1257 yield struct.pack(">l", 0)
1278
1258
1279 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1259 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1280
1260
1281 return util.chunkbuffer(gengroup())
1261 return util.chunkbuffer(gengroup())
1282
1262
1283 def changegroup(self, basenodes, source):
1263 def changegroup(self, basenodes, source):
1284 """Generate a changegroup of all nodes that we have that a recipient
1264 """Generate a changegroup of all nodes that we have that a recipient
1285 doesn't.
1265 doesn't.
1286
1266
1287 This is much easier than the previous function as we can assume that
1267 This is much easier than the previous function as we can assume that
1288 the recipient has any changenode we aren't sending them."""
1268 the recipient has any changenode we aren't sending them."""
1289
1269
1290 self.hook('preoutgoing', throw=True, source=source)
1270 self.hook('preoutgoing', throw=True, source=source)
1291
1271
1292 cl = self.changelog
1272 cl = self.changelog
1293 nodes = cl.nodesbetween(basenodes, None)[0]
1273 nodes = cl.nodesbetween(basenodes, None)[0]
1294 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1274 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1295
1275
1296 def identity(x):
1276 def identity(x):
1297 return x
1277 return x
1298
1278
1299 def gennodelst(revlog):
1279 def gennodelst(revlog):
1300 for r in xrange(0, revlog.count()):
1280 for r in xrange(0, revlog.count()):
1301 n = revlog.node(r)
1281 n = revlog.node(r)
1302 if revlog.linkrev(n) in revset:
1282 if revlog.linkrev(n) in revset:
1303 yield n
1283 yield n
1304
1284
1305 def changed_file_collector(changedfileset):
1285 def changed_file_collector(changedfileset):
1306 def collect_changed_files(clnode):
1286 def collect_changed_files(clnode):
1307 c = cl.read(clnode)
1287 c = cl.read(clnode)
1308 for fname in c[3]:
1288 for fname in c[3]:
1309 changedfileset[fname] = 1
1289 changedfileset[fname] = 1
1310 return collect_changed_files
1290 return collect_changed_files
1311
1291
1312 def lookuprevlink_func(revlog):
1292 def lookuprevlink_func(revlog):
1313 def lookuprevlink(n):
1293 def lookuprevlink(n):
1314 return cl.node(revlog.linkrev(n))
1294 return cl.node(revlog.linkrev(n))
1315 return lookuprevlink
1295 return lookuprevlink
1316
1296
1317 def gengroup():
1297 def gengroup():
1318 # construct a list of all changed files
1298 # construct a list of all changed files
1319 changedfiles = {}
1299 changedfiles = {}
1320
1300
1321 for chnk in cl.group(nodes, identity,
1301 for chnk in cl.group(nodes, identity,
1322 changed_file_collector(changedfiles)):
1302 changed_file_collector(changedfiles)):
1323 yield chnk
1303 yield chnk
1324 changedfiles = changedfiles.keys()
1304 changedfiles = changedfiles.keys()
1325 changedfiles.sort()
1305 changedfiles.sort()
1326
1306
1327 mnfst = self.manifest
1307 mnfst = self.manifest
1328 nodeiter = gennodelst(mnfst)
1308 nodeiter = gennodelst(mnfst)
1329 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1309 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1330 yield chnk
1310 yield chnk
1331
1311
1332 for fname in changedfiles:
1312 for fname in changedfiles:
1333 filerevlog = self.file(fname)
1313 filerevlog = self.file(fname)
1334 nodeiter = gennodelst(filerevlog)
1314 nodeiter = gennodelst(filerevlog)
1335 nodeiter = list(nodeiter)
1315 nodeiter = list(nodeiter)
1336 if nodeiter:
1316 if nodeiter:
1337 yield struct.pack(">l", len(fname) + 4) + fname
1317 yield struct.pack(">l", len(fname) + 4) + fname
1338 lookup = lookuprevlink_func(filerevlog)
1318 lookup = lookuprevlink_func(filerevlog)
1339 for chnk in filerevlog.group(nodeiter, lookup):
1319 for chnk in filerevlog.group(nodeiter, lookup):
1340 yield chnk
1320 yield chnk
1341
1321
1342 yield struct.pack(">l", 0)
1322 yield struct.pack(">l", 0)
1343 self.hook('outgoing', node=hex(nodes[0]), source=source)
1323 self.hook('outgoing', node=hex(nodes[0]), source=source)
1344
1324
1345 return util.chunkbuffer(gengroup())
1325 return util.chunkbuffer(gengroup())
1346
1326
1347 def addchangegroup(self, source):
1327 def addchangegroup(self, source):
1348
1328
1349 def getchunk():
1329 def getchunk():
1350 d = source.read(4)
1330 d = source.read(4)
1351 if not d:
1331 if not d:
1352 return ""
1332 return ""
1353 l = struct.unpack(">l", d)[0]
1333 l = struct.unpack(">l", d)[0]
1354 if l <= 4:
1334 if l <= 4:
1355 return ""
1335 return ""
1356 d = source.read(l - 4)
1336 d = source.read(l - 4)
1357 if len(d) < l - 4:
1337 if len(d) < l - 4:
1358 raise repo.RepoError(_("premature EOF reading chunk"
1338 raise repo.RepoError(_("premature EOF reading chunk"
1359 " (got %d bytes, expected %d)")
1339 " (got %d bytes, expected %d)")
1360 % (len(d), l - 4))
1340 % (len(d), l - 4))
1361 return d
1341 return d
1362
1342
1363 def getgroup():
1343 def getgroup():
1364 while 1:
1344 while 1:
1365 c = getchunk()
1345 c = getchunk()
1366 if not c:
1346 if not c:
1367 break
1347 break
1368 yield c
1348 yield c
1369
1349
1370 def csmap(x):
1350 def csmap(x):
1371 self.ui.debug(_("add changeset %s\n") % short(x))
1351 self.ui.debug(_("add changeset %s\n") % short(x))
1372 return self.changelog.count()
1352 return self.changelog.count()
1373
1353
1374 def revmap(x):
1354 def revmap(x):
1375 return self.changelog.rev(x)
1355 return self.changelog.rev(x)
1376
1356
1377 if not source:
1357 if not source:
1378 return
1358 return
1379
1359
1380 self.hook('prechangegroup', throw=True)
1360 self.hook('prechangegroup', throw=True)
1381
1361
1382 changesets = files = revisions = 0
1362 changesets = files = revisions = 0
1383
1363
1384 tr = self.transaction()
1364 tr = self.transaction()
1385
1365
1386 oldheads = len(self.changelog.heads())
1366 oldheads = len(self.changelog.heads())
1387
1367
1388 # pull off the changeset group
1368 # pull off the changeset group
1389 self.ui.status(_("adding changesets\n"))
1369 self.ui.status(_("adding changesets\n"))
1390 co = self.changelog.tip()
1370 co = self.changelog.tip()
1391 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1371 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1392 cnr, cor = map(self.changelog.rev, (cn, co))
1372 cnr, cor = map(self.changelog.rev, (cn, co))
1393 if cn == nullid:
1373 if cn == nullid:
1394 cnr = cor
1374 cnr = cor
1395 changesets = cnr - cor
1375 changesets = cnr - cor
1396
1376
1397 # pull off the manifest group
1377 # pull off the manifest group
1398 self.ui.status(_("adding manifests\n"))
1378 self.ui.status(_("adding manifests\n"))
1399 mm = self.manifest.tip()
1379 mm = self.manifest.tip()
1400 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1380 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1401
1381
1402 # process the files
1382 # process the files
1403 self.ui.status(_("adding file changes\n"))
1383 self.ui.status(_("adding file changes\n"))
1404 while 1:
1384 while 1:
1405 f = getchunk()
1385 f = getchunk()
1406 if not f:
1386 if not f:
1407 break
1387 break
1408 self.ui.debug(_("adding %s revisions\n") % f)
1388 self.ui.debug(_("adding %s revisions\n") % f)
1409 fl = self.file(f)
1389 fl = self.file(f)
1410 o = fl.count()
1390 o = fl.count()
1411 n = fl.addgroup(getgroup(), revmap, tr)
1391 n = fl.addgroup(getgroup(), revmap, tr)
1412 revisions += fl.count() - o
1392 revisions += fl.count() - o
1413 files += 1
1393 files += 1
1414
1394
1415 newheads = len(self.changelog.heads())
1395 newheads = len(self.changelog.heads())
1416 heads = ""
1396 heads = ""
1417 if oldheads and newheads > oldheads:
1397 if oldheads and newheads > oldheads:
1418 heads = _(" (+%d heads)") % (newheads - oldheads)
1398 heads = _(" (+%d heads)") % (newheads - oldheads)
1419
1399
1420 self.ui.status(_("added %d changesets"
1400 self.ui.status(_("added %d changesets"
1421 " with %d changes to %d files%s\n")
1401 " with %d changes to %d files%s\n")
1422 % (changesets, revisions, files, heads))
1402 % (changesets, revisions, files, heads))
1423
1403
1424 self.hook('pretxnchangegroup', throw=True,
1404 self.hook('pretxnchangegroup', throw=True,
1425 node=hex(self.changelog.node(cor+1)))
1405 node=hex(self.changelog.node(cor+1)))
1426
1406
1427 tr.close()
1407 tr.close()
1428
1408
1429 if changesets > 0:
1409 if changesets > 0:
1430 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1410 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1431
1411
1432 for i in range(cor + 1, cnr + 1):
1412 for i in range(cor + 1, cnr + 1):
1433 self.hook("incoming", node=hex(self.changelog.node(i)))
1413 self.hook("incoming", node=hex(self.changelog.node(i)))
1434
1414
1435 def update(self, node, allow=False, force=False, choose=None,
1415 def update(self, node, allow=False, force=False, choose=None,
1436 moddirstate=True, forcemerge=False, wlock=None):
1416 moddirstate=True, forcemerge=False, wlock=None):
1437 pl = self.dirstate.parents()
1417 pl = self.dirstate.parents()
1438 if not force and pl[1] != nullid:
1418 if not force and pl[1] != nullid:
1439 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1419 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1440 return 1
1420 return 1
1441
1421
1442 err = False
1422 err = False
1443
1423
1444 p1, p2 = pl[0], node
1424 p1, p2 = pl[0], node
1445 pa = self.changelog.ancestor(p1, p2)
1425 pa = self.changelog.ancestor(p1, p2)
1446 m1n = self.changelog.read(p1)[0]
1426 m1n = self.changelog.read(p1)[0]
1447 m2n = self.changelog.read(p2)[0]
1427 m2n = self.changelog.read(p2)[0]
1448 man = self.manifest.ancestor(m1n, m2n)
1428 man = self.manifest.ancestor(m1n, m2n)
1449 m1 = self.manifest.read(m1n)
1429 m1 = self.manifest.read(m1n)
1450 mf1 = self.manifest.readflags(m1n)
1430 mf1 = self.manifest.readflags(m1n)
1451 m2 = self.manifest.read(m2n).copy()
1431 m2 = self.manifest.read(m2n).copy()
1452 mf2 = self.manifest.readflags(m2n)
1432 mf2 = self.manifest.readflags(m2n)
1453 ma = self.manifest.read(man)
1433 ma = self.manifest.read(man)
1454 mfa = self.manifest.readflags(man)
1434 mfa = self.manifest.readflags(man)
1455
1435
1456 modified, added, removed, deleted, unknown = self.changes()
1436 modified, added, removed, deleted, unknown = self.changes()
1457
1437
1458 # is this a jump, or a merge? i.e. is there a linear path
1438 # is this a jump, or a merge? i.e. is there a linear path
1459 # from p1 to p2?
1439 # from p1 to p2?
1460 linear_path = (pa == p1 or pa == p2)
1440 linear_path = (pa == p1 or pa == p2)
1461
1441
1462 if allow and linear_path:
1442 if allow and linear_path:
1463 raise util.Abort(_("there is nothing to merge, "
1443 raise util.Abort(_("there is nothing to merge, "
1464 "just use 'hg update'"))
1444 "just use 'hg update'"))
1465 if allow and not forcemerge:
1445 if allow and not forcemerge:
1466 if modified or added or removed:
1446 if modified or added or removed:
1467 raise util.Abort(_("outstanding uncommited changes"))
1447 raise util.Abort(_("outstanding uncommited changes"))
1468 if not forcemerge and not force:
1448 if not forcemerge and not force:
1469 for f in unknown:
1449 for f in unknown:
1470 if f in m2:
1450 if f in m2:
1471 t1 = self.wread(f)
1451 t1 = self.wread(f)
1472 t2 = self.file(f).read(m2[f])
1452 t2 = self.file(f).read(m2[f])
1473 if cmp(t1, t2) != 0:
1453 if cmp(t1, t2) != 0:
1474 raise util.Abort(_("'%s' already exists in the working"
1454 raise util.Abort(_("'%s' already exists in the working"
1475 " dir and differs from remote") % f)
1455 " dir and differs from remote") % f)
1476
1456
1477 # resolve the manifest to determine which files
1457 # resolve the manifest to determine which files
1478 # we care about merging
1458 # we care about merging
1479 self.ui.note(_("resolving manifests\n"))
1459 self.ui.note(_("resolving manifests\n"))
1480 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1460 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1481 (force, allow, moddirstate, linear_path))
1461 (force, allow, moddirstate, linear_path))
1482 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1462 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1483 (short(man), short(m1n), short(m2n)))
1463 (short(man), short(m1n), short(m2n)))
1484
1464
1485 merge = {}
1465 merge = {}
1486 get = {}
1466 get = {}
1487 remove = []
1467 remove = []
1488
1468
1489 # construct a working dir manifest
1469 # construct a working dir manifest
1490 mw = m1.copy()
1470 mw = m1.copy()
1491 mfw = mf1.copy()
1471 mfw = mf1.copy()
1492 umap = dict.fromkeys(unknown)
1472 umap = dict.fromkeys(unknown)
1493
1473
1494 for f in added + modified + unknown:
1474 for f in added + modified + unknown:
1495 mw[f] = ""
1475 mw[f] = ""
1496 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1476 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1497
1477
1498 if moddirstate and not wlock:
1478 if moddirstate and not wlock:
1499 wlock = self.wlock()
1479 wlock = self.wlock()
1500
1480
1501 for f in deleted + removed:
1481 for f in deleted + removed:
1502 if f in mw:
1482 if f in mw:
1503 del mw[f]
1483 del mw[f]
1504
1484
1505 # If we're jumping between revisions (as opposed to merging),
1485 # If we're jumping between revisions (as opposed to merging),
1506 # and if neither the working directory nor the target rev has
1486 # and if neither the working directory nor the target rev has
1507 # the file, then we need to remove it from the dirstate, to
1487 # the file, then we need to remove it from the dirstate, to
1508 # prevent the dirstate from listing the file when it is no
1488 # prevent the dirstate from listing the file when it is no
1509 # longer in the manifest.
1489 # longer in the manifest.
1510 if moddirstate and linear_path and f not in m2:
1490 if moddirstate and linear_path and f not in m2:
1511 self.dirstate.forget((f,))
1491 self.dirstate.forget((f,))
1512
1492
1513 # Compare manifests
1493 # Compare manifests
1514 for f, n in mw.iteritems():
1494 for f, n in mw.iteritems():
1515 if choose and not choose(f):
1495 if choose and not choose(f):
1516 continue
1496 continue
1517 if f in m2:
1497 if f in m2:
1518 s = 0
1498 s = 0
1519
1499
1520 # is the wfile new since m1, and match m2?
1500 # is the wfile new since m1, and match m2?
1521 if f not in m1:
1501 if f not in m1:
1522 t1 = self.wread(f)
1502 t1 = self.wread(f)
1523 t2 = self.file(f).read(m2[f])
1503 t2 = self.file(f).read(m2[f])
1524 if cmp(t1, t2) == 0:
1504 if cmp(t1, t2) == 0:
1525 n = m2[f]
1505 n = m2[f]
1526 del t1, t2
1506 del t1, t2
1527
1507
1528 # are files different?
1508 # are files different?
1529 if n != m2[f]:
1509 if n != m2[f]:
1530 a = ma.get(f, nullid)
1510 a = ma.get(f, nullid)
1531 # are both different from the ancestor?
1511 # are both different from the ancestor?
1532 if n != a and m2[f] != a:
1512 if n != a and m2[f] != a:
1533 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1513 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1534 # merge executable bits
1514 # merge executable bits
1535 # "if we changed or they changed, change in merge"
1515 # "if we changed or they changed, change in merge"
1536 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1516 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1537 mode = ((a^b) | (a^c)) ^ a
1517 mode = ((a^b) | (a^c)) ^ a
1538 merge[f] = (m1.get(f, nullid), m2[f], mode)
1518 merge[f] = (m1.get(f, nullid), m2[f], mode)
1539 s = 1
1519 s = 1
1540 # are we clobbering?
1520 # are we clobbering?
1541 # is remote's version newer?
1521 # is remote's version newer?
1542 # or are we going back in time?
1522 # or are we going back in time?
1543 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1523 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1544 self.ui.debug(_(" remote %s is newer, get\n") % f)
1524 self.ui.debug(_(" remote %s is newer, get\n") % f)
1545 get[f] = m2[f]
1525 get[f] = m2[f]
1546 s = 1
1526 s = 1
1547 elif f in umap:
1527 elif f in umap:
1548 # this unknown file is the same as the checkout
1528 # this unknown file is the same as the checkout
1549 get[f] = m2[f]
1529 get[f] = m2[f]
1550
1530
1551 if not s and mfw[f] != mf2[f]:
1531 if not s and mfw[f] != mf2[f]:
1552 if force:
1532 if force:
1553 self.ui.debug(_(" updating permissions for %s\n") % f)
1533 self.ui.debug(_(" updating permissions for %s\n") % f)
1554 util.set_exec(self.wjoin(f), mf2[f])
1534 util.set_exec(self.wjoin(f), mf2[f])
1555 else:
1535 else:
1556 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1536 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1557 mode = ((a^b) | (a^c)) ^ a
1537 mode = ((a^b) | (a^c)) ^ a
1558 if mode != b:
1538 if mode != b:
1559 self.ui.debug(_(" updating permissions for %s\n")
1539 self.ui.debug(_(" updating permissions for %s\n")
1560 % f)
1540 % f)
1561 util.set_exec(self.wjoin(f), mode)
1541 util.set_exec(self.wjoin(f), mode)
1562 del m2[f]
1542 del m2[f]
1563 elif f in ma:
1543 elif f in ma:
1564 if n != ma[f]:
1544 if n != ma[f]:
1565 r = _("d")
1545 r = _("d")
1566 if not force and (linear_path or allow):
1546 if not force and (linear_path or allow):
1567 r = self.ui.prompt(
1547 r = self.ui.prompt(
1568 (_(" local changed %s which remote deleted\n") % f) +
1548 (_(" local changed %s which remote deleted\n") % f) +
1569 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1549 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1570 if r == _("d"):
1550 if r == _("d"):
1571 remove.append(f)
1551 remove.append(f)
1572 else:
1552 else:
1573 self.ui.debug(_("other deleted %s\n") % f)
1553 self.ui.debug(_("other deleted %s\n") % f)
1574 remove.append(f) # other deleted it
1554 remove.append(f) # other deleted it
1575 else:
1555 else:
1576 # file is created on branch or in working directory
1556 # file is created on branch or in working directory
1577 if force and f not in umap:
1557 if force and f not in umap:
1578 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1558 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1579 remove.append(f)
1559 remove.append(f)
1580 elif n == m1.get(f, nullid): # same as parent
1560 elif n == m1.get(f, nullid): # same as parent
1581 if p2 == pa: # going backwards?
1561 if p2 == pa: # going backwards?
1582 self.ui.debug(_("remote deleted %s\n") % f)
1562 self.ui.debug(_("remote deleted %s\n") % f)
1583 remove.append(f)
1563 remove.append(f)
1584 else:
1564 else:
1585 self.ui.debug(_("local modified %s, keeping\n") % f)
1565 self.ui.debug(_("local modified %s, keeping\n") % f)
1586 else:
1566 else:
1587 self.ui.debug(_("working dir created %s, keeping\n") % f)
1567 self.ui.debug(_("working dir created %s, keeping\n") % f)
1588
1568
1589 for f, n in m2.iteritems():
1569 for f, n in m2.iteritems():
1590 if choose and not choose(f):
1570 if choose and not choose(f):
1591 continue
1571 continue
1592 if f[0] == "/":
1572 if f[0] == "/":
1593 continue
1573 continue
1594 if f in ma and n != ma[f]:
1574 if f in ma and n != ma[f]:
1595 r = _("k")
1575 r = _("k")
1596 if not force and (linear_path or allow):
1576 if not force and (linear_path or allow):
1597 r = self.ui.prompt(
1577 r = self.ui.prompt(
1598 (_("remote changed %s which local deleted\n") % f) +
1578 (_("remote changed %s which local deleted\n") % f) +
1599 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1579 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1600 if r == _("k"):
1580 if r == _("k"):
1601 get[f] = n
1581 get[f] = n
1602 elif f not in ma:
1582 elif f not in ma:
1603 self.ui.debug(_("remote created %s\n") % f)
1583 self.ui.debug(_("remote created %s\n") % f)
1604 get[f] = n
1584 get[f] = n
1605 else:
1585 else:
1606 if force or p2 == pa: # going backwards?
1586 if force or p2 == pa: # going backwards?
1607 self.ui.debug(_("local deleted %s, recreating\n") % f)
1587 self.ui.debug(_("local deleted %s, recreating\n") % f)
1608 get[f] = n
1588 get[f] = n
1609 else:
1589 else:
1610 self.ui.debug(_("local deleted %s\n") % f)
1590 self.ui.debug(_("local deleted %s\n") % f)
1611
1591
1612 del mw, m1, m2, ma
1592 del mw, m1, m2, ma
1613
1593
1614 if force:
1594 if force:
1615 for f in merge:
1595 for f in merge:
1616 get[f] = merge[f][1]
1596 get[f] = merge[f][1]
1617 merge = {}
1597 merge = {}
1618
1598
1619 if linear_path or force:
1599 if linear_path or force:
1620 # we don't need to do any magic, just jump to the new rev
1600 # we don't need to do any magic, just jump to the new rev
1621 branch_merge = False
1601 branch_merge = False
1622 p1, p2 = p2, nullid
1602 p1, p2 = p2, nullid
1623 else:
1603 else:
1624 if not allow:
1604 if not allow:
1625 self.ui.status(_("this update spans a branch"
1605 self.ui.status(_("this update spans a branch"
1626 " affecting the following files:\n"))
1606 " affecting the following files:\n"))
1627 fl = merge.keys() + get.keys()
1607 fl = merge.keys() + get.keys()
1628 fl.sort()
1608 fl.sort()
1629 for f in fl:
1609 for f in fl:
1630 cf = ""
1610 cf = ""
1631 if f in merge:
1611 if f in merge:
1632 cf = _(" (resolve)")
1612 cf = _(" (resolve)")
1633 self.ui.status(" %s%s\n" % (f, cf))
1613 self.ui.status(" %s%s\n" % (f, cf))
1634 self.ui.warn(_("aborting update spanning branches!\n"))
1614 self.ui.warn(_("aborting update spanning branches!\n"))
1635 self.ui.status(_("(use update -m to merge across branches"
1615 self.ui.status(_("(use update -m to merge across branches"
1636 " or -C to lose changes)\n"))
1616 " or -C to lose changes)\n"))
1637 return 1
1617 return 1
1638 branch_merge = True
1618 branch_merge = True
1639
1619
1640 # get the files we don't need to change
1620 # get the files we don't need to change
1641 files = get.keys()
1621 files = get.keys()
1642 files.sort()
1622 files.sort()
1643 for f in files:
1623 for f in files:
1644 if f[0] == "/":
1624 if f[0] == "/":
1645 continue
1625 continue
1646 self.ui.note(_("getting %s\n") % f)
1626 self.ui.note(_("getting %s\n") % f)
1647 t = self.file(f).read(get[f])
1627 t = self.file(f).read(get[f])
1648 self.wwrite(f, t)
1628 self.wwrite(f, t)
1649 util.set_exec(self.wjoin(f), mf2[f])
1629 util.set_exec(self.wjoin(f), mf2[f])
1650 if moddirstate:
1630 if moddirstate:
1651 if branch_merge:
1631 if branch_merge:
1652 self.dirstate.update([f], 'n', st_mtime=-1)
1632 self.dirstate.update([f], 'n', st_mtime=-1)
1653 else:
1633 else:
1654 self.dirstate.update([f], 'n')
1634 self.dirstate.update([f], 'n')
1655
1635
1656 # merge the tricky bits
1636 # merge the tricky bits
1657 files = merge.keys()
1637 files = merge.keys()
1658 files.sort()
1638 files.sort()
1659 for f in files:
1639 for f in files:
1660 self.ui.status(_("merging %s\n") % f)
1640 self.ui.status(_("merging %s\n") % f)
1661 my, other, flag = merge[f]
1641 my, other, flag = merge[f]
1662 ret = self.merge3(f, my, other)
1642 ret = self.merge3(f, my, other)
1663 if ret:
1643 if ret:
1664 err = True
1644 err = True
1665 util.set_exec(self.wjoin(f), flag)
1645 util.set_exec(self.wjoin(f), flag)
1666 if moddirstate:
1646 if moddirstate:
1667 if branch_merge:
1647 if branch_merge:
1668 # We've done a branch merge, mark this file as merged
1648 # We've done a branch merge, mark this file as merged
1669 # so that we properly record the merger later
1649 # so that we properly record the merger later
1670 self.dirstate.update([f], 'm')
1650 self.dirstate.update([f], 'm')
1671 else:
1651 else:
1672 # We've update-merged a locally modified file, so
1652 # We've update-merged a locally modified file, so
1673 # we set the dirstate to emulate a normal checkout
1653 # we set the dirstate to emulate a normal checkout
1674 # of that file some time in the past. Thus our
1654 # of that file some time in the past. Thus our
1675 # merge will appear as a normal local file
1655 # merge will appear as a normal local file
1676 # modification.
1656 # modification.
1677 f_len = len(self.file(f).read(other))
1657 f_len = len(self.file(f).read(other))
1678 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1658 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1679
1659
1680 remove.sort()
1660 remove.sort()
1681 for f in remove:
1661 for f in remove:
1682 self.ui.note(_("removing %s\n") % f)
1662 self.ui.note(_("removing %s\n") % f)
1683 util.audit_path(f)
1663 util.audit_path(f)
1684 try:
1664 try:
1685 util.unlink(self.wjoin(f))
1665 util.unlink(self.wjoin(f))
1686 except OSError, inst:
1666 except OSError, inst:
1687 if inst.errno != errno.ENOENT:
1667 if inst.errno != errno.ENOENT:
1688 self.ui.warn(_("update failed to remove %s: %s!\n") %
1668 self.ui.warn(_("update failed to remove %s: %s!\n") %
1689 (f, inst.strerror))
1669 (f, inst.strerror))
1690 if moddirstate:
1670 if moddirstate:
1691 if branch_merge:
1671 if branch_merge:
1692 self.dirstate.update(remove, 'r')
1672 self.dirstate.update(remove, 'r')
1693 else:
1673 else:
1694 self.dirstate.forget(remove)
1674 self.dirstate.forget(remove)
1695
1675
1696 if moddirstate:
1676 if moddirstate:
1697 self.dirstate.setparents(p1, p2)
1677 self.dirstate.setparents(p1, p2)
1698 return err
1678 return err
1699
1679
1700 def merge3(self, fn, my, other):
1680 def merge3(self, fn, my, other):
1701 """perform a 3-way merge in the working directory"""
1681 """perform a 3-way merge in the working directory"""
1702
1682
1703 def temp(prefix, node):
1683 def temp(prefix, node):
1704 pre = "%s~%s." % (os.path.basename(fn), prefix)
1684 pre = "%s~%s." % (os.path.basename(fn), prefix)
1705 (fd, name) = tempfile.mkstemp("", pre)
1685 (fd, name) = tempfile.mkstemp("", pre)
1706 f = os.fdopen(fd, "wb")
1686 f = os.fdopen(fd, "wb")
1707 self.wwrite(fn, fl.read(node), f)
1687 self.wwrite(fn, fl.read(node), f)
1708 f.close()
1688 f.close()
1709 return name
1689 return name
1710
1690
1711 fl = self.file(fn)
1691 fl = self.file(fn)
1712 base = fl.ancestor(my, other)
1692 base = fl.ancestor(my, other)
1713 a = self.wjoin(fn)
1693 a = self.wjoin(fn)
1714 b = temp("base", base)
1694 b = temp("base", base)
1715 c = temp("other", other)
1695 c = temp("other", other)
1716
1696
1717 self.ui.note(_("resolving %s\n") % fn)
1697 self.ui.note(_("resolving %s\n") % fn)
1718 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1698 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1719 (fn, short(my), short(other), short(base)))
1699 (fn, short(my), short(other), short(base)))
1720
1700
1721 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1701 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1722 or "hgmerge")
1702 or "hgmerge")
1723 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1703 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1724 if r:
1704 if r:
1725 self.ui.warn(_("merging %s failed!\n") % fn)
1705 self.ui.warn(_("merging %s failed!\n") % fn)
1726
1706
1727 os.unlink(b)
1707 os.unlink(b)
1728 os.unlink(c)
1708 os.unlink(c)
1729 return r
1709 return r
1730
1710
1731 def verify(self):
1711 def verify(self):
1732 filelinkrevs = {}
1712 filelinkrevs = {}
1733 filenodes = {}
1713 filenodes = {}
1734 changesets = revisions = files = 0
1714 changesets = revisions = files = 0
1735 errors = [0]
1715 errors = [0]
1736 neededmanifests = {}
1716 neededmanifests = {}
1737
1717
1738 def err(msg):
1718 def err(msg):
1739 self.ui.warn(msg + "\n")
1719 self.ui.warn(msg + "\n")
1740 errors[0] += 1
1720 errors[0] += 1
1741
1721
1742 def checksize(obj, name):
1722 def checksize(obj, name):
1743 d = obj.checksize()
1723 d = obj.checksize()
1744 if d[0]:
1724 if d[0]:
1745 err(_("%s data length off by %d bytes") % (name, d[0]))
1725 err(_("%s data length off by %d bytes") % (name, d[0]))
1746 if d[1]:
1726 if d[1]:
1747 err(_("%s index contains %d extra bytes") % (name, d[1]))
1727 err(_("%s index contains %d extra bytes") % (name, d[1]))
1748
1728
1749 seen = {}
1729 seen = {}
1750 self.ui.status(_("checking changesets\n"))
1730 self.ui.status(_("checking changesets\n"))
1751 checksize(self.changelog, "changelog")
1731 checksize(self.changelog, "changelog")
1752
1732
1753 for i in range(self.changelog.count()):
1733 for i in range(self.changelog.count()):
1754 changesets += 1
1734 changesets += 1
1755 n = self.changelog.node(i)
1735 n = self.changelog.node(i)
1756 l = self.changelog.linkrev(n)
1736 l = self.changelog.linkrev(n)
1757 if l != i:
1737 if l != i:
1758 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1738 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1759 if n in seen:
1739 if n in seen:
1760 err(_("duplicate changeset at revision %d") % i)
1740 err(_("duplicate changeset at revision %d") % i)
1761 seen[n] = 1
1741 seen[n] = 1
1762
1742
1763 for p in self.changelog.parents(n):
1743 for p in self.changelog.parents(n):
1764 if p not in self.changelog.nodemap:
1744 if p not in self.changelog.nodemap:
1765 err(_("changeset %s has unknown parent %s") %
1745 err(_("changeset %s has unknown parent %s") %
1766 (short(n), short(p)))
1746 (short(n), short(p)))
1767 try:
1747 try:
1768 changes = self.changelog.read(n)
1748 changes = self.changelog.read(n)
1769 except KeyboardInterrupt:
1749 except KeyboardInterrupt:
1770 self.ui.warn(_("interrupted"))
1750 self.ui.warn(_("interrupted"))
1771 raise
1751 raise
1772 except Exception, inst:
1752 except Exception, inst:
1773 err(_("unpacking changeset %s: %s") % (short(n), inst))
1753 err(_("unpacking changeset %s: %s") % (short(n), inst))
1774 continue
1754 continue
1775
1755
1776 neededmanifests[changes[0]] = n
1756 neededmanifests[changes[0]] = n
1777
1757
1778 for f in changes[3]:
1758 for f in changes[3]:
1779 filelinkrevs.setdefault(f, []).append(i)
1759 filelinkrevs.setdefault(f, []).append(i)
1780
1760
1781 seen = {}
1761 seen = {}
1782 self.ui.status(_("checking manifests\n"))
1762 self.ui.status(_("checking manifests\n"))
1783 checksize(self.manifest, "manifest")
1763 checksize(self.manifest, "manifest")
1784
1764
1785 for i in range(self.manifest.count()):
1765 for i in range(self.manifest.count()):
1786 n = self.manifest.node(i)
1766 n = self.manifest.node(i)
1787 l = self.manifest.linkrev(n)
1767 l = self.manifest.linkrev(n)
1788
1768
1789 if l < 0 or l >= self.changelog.count():
1769 if l < 0 or l >= self.changelog.count():
1790 err(_("bad manifest link (%d) at revision %d") % (l, i))
1770 err(_("bad manifest link (%d) at revision %d") % (l, i))
1791
1771
1792 if n in neededmanifests:
1772 if n in neededmanifests:
1793 del neededmanifests[n]
1773 del neededmanifests[n]
1794
1774
1795 if n in seen:
1775 if n in seen:
1796 err(_("duplicate manifest at revision %d") % i)
1776 err(_("duplicate manifest at revision %d") % i)
1797
1777
1798 seen[n] = 1
1778 seen[n] = 1
1799
1779
1800 for p in self.manifest.parents(n):
1780 for p in self.manifest.parents(n):
1801 if p not in self.manifest.nodemap:
1781 if p not in self.manifest.nodemap:
1802 err(_("manifest %s has unknown parent %s") %
1782 err(_("manifest %s has unknown parent %s") %
1803 (short(n), short(p)))
1783 (short(n), short(p)))
1804
1784
1805 try:
1785 try:
1806 delta = mdiff.patchtext(self.manifest.delta(n))
1786 delta = mdiff.patchtext(self.manifest.delta(n))
1807 except KeyboardInterrupt:
1787 except KeyboardInterrupt:
1808 self.ui.warn(_("interrupted"))
1788 self.ui.warn(_("interrupted"))
1809 raise
1789 raise
1810 except Exception, inst:
1790 except Exception, inst:
1811 err(_("unpacking manifest %s: %s") % (short(n), inst))
1791 err(_("unpacking manifest %s: %s") % (short(n), inst))
1812 continue
1792 continue
1813
1793
1814 try:
1794 try:
1815 ff = [ l.split('\0') for l in delta.splitlines() ]
1795 ff = [ l.split('\0') for l in delta.splitlines() ]
1816 for f, fn in ff:
1796 for f, fn in ff:
1817 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1797 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1818 except (ValueError, TypeError), inst:
1798 except (ValueError, TypeError), inst:
1819 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1799 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1820
1800
1821 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1801 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1822
1802
1823 for m, c in neededmanifests.items():
1803 for m, c in neededmanifests.items():
1824 err(_("Changeset %s refers to unknown manifest %s") %
1804 err(_("Changeset %s refers to unknown manifest %s") %
1825 (short(m), short(c)))
1805 (short(m), short(c)))
1826 del neededmanifests
1806 del neededmanifests
1827
1807
1828 for f in filenodes:
1808 for f in filenodes:
1829 if f not in filelinkrevs:
1809 if f not in filelinkrevs:
1830 err(_("file %s in manifest but not in changesets") % f)
1810 err(_("file %s in manifest but not in changesets") % f)
1831
1811
1832 for f in filelinkrevs:
1812 for f in filelinkrevs:
1833 if f not in filenodes:
1813 if f not in filenodes:
1834 err(_("file %s in changeset but not in manifest") % f)
1814 err(_("file %s in changeset but not in manifest") % f)
1835
1815
1836 self.ui.status(_("checking files\n"))
1816 self.ui.status(_("checking files\n"))
1837 ff = filenodes.keys()
1817 ff = filenodes.keys()
1838 ff.sort()
1818 ff.sort()
1839 for f in ff:
1819 for f in ff:
1840 if f == "/dev/null":
1820 if f == "/dev/null":
1841 continue
1821 continue
1842 files += 1
1822 files += 1
1843 if not f:
1823 if not f:
1844 err(_("file without name in manifest %s") % short(n))
1824 err(_("file without name in manifest %s") % short(n))
1845 continue
1825 continue
1846 fl = self.file(f)
1826 fl = self.file(f)
1847 checksize(fl, f)
1827 checksize(fl, f)
1848
1828
1849 nodes = {nullid: 1}
1829 nodes = {nullid: 1}
1850 seen = {}
1830 seen = {}
1851 for i in range(fl.count()):
1831 for i in range(fl.count()):
1852 revisions += 1
1832 revisions += 1
1853 n = fl.node(i)
1833 n = fl.node(i)
1854
1834
1855 if n in seen:
1835 if n in seen:
1856 err(_("%s: duplicate revision %d") % (f, i))
1836 err(_("%s: duplicate revision %d") % (f, i))
1857 if n not in filenodes[f]:
1837 if n not in filenodes[f]:
1858 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1838 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1859 else:
1839 else:
1860 del filenodes[f][n]
1840 del filenodes[f][n]
1861
1841
1862 flr = fl.linkrev(n)
1842 flr = fl.linkrev(n)
1863 if flr not in filelinkrevs.get(f, []):
1843 if flr not in filelinkrevs.get(f, []):
1864 err(_("%s:%s points to unexpected changeset %d")
1844 err(_("%s:%s points to unexpected changeset %d")
1865 % (f, short(n), flr))
1845 % (f, short(n), flr))
1866 else:
1846 else:
1867 filelinkrevs[f].remove(flr)
1847 filelinkrevs[f].remove(flr)
1868
1848
1869 # verify contents
1849 # verify contents
1870 try:
1850 try:
1871 t = fl.read(n)
1851 t = fl.read(n)
1872 except KeyboardInterrupt:
1852 except KeyboardInterrupt:
1873 self.ui.warn(_("interrupted"))
1853 self.ui.warn(_("interrupted"))
1874 raise
1854 raise
1875 except Exception, inst:
1855 except Exception, inst:
1876 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1856 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1877
1857
1878 # verify parents
1858 # verify parents
1879 (p1, p2) = fl.parents(n)
1859 (p1, p2) = fl.parents(n)
1880 if p1 not in nodes:
1860 if p1 not in nodes:
1881 err(_("file %s:%s unknown parent 1 %s") %
1861 err(_("file %s:%s unknown parent 1 %s") %
1882 (f, short(n), short(p1)))
1862 (f, short(n), short(p1)))
1883 if p2 not in nodes:
1863 if p2 not in nodes:
1884 err(_("file %s:%s unknown parent 2 %s") %
1864 err(_("file %s:%s unknown parent 2 %s") %
1885 (f, short(n), short(p1)))
1865 (f, short(n), short(p1)))
1886 nodes[n] = 1
1866 nodes[n] = 1
1887
1867
1888 # cross-check
1868 # cross-check
1889 for node in filenodes[f]:
1869 for node in filenodes[f]:
1890 err(_("node %s in manifests not in %s") % (hex(node), f))
1870 err(_("node %s in manifests not in %s") % (hex(node), f))
1891
1871
1892 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1872 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1893 (files, changesets, revisions))
1873 (files, changesets, revisions))
1894
1874
1895 if errors[0]:
1875 if errors[0]:
1896 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1876 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1897 return 1
1877 return 1
1898
1878
1899 # used to avoid circular references so destructors work
1879 # used to avoid circular references so destructors work
1900 def aftertrans(base):
1880 def aftertrans(base):
1901 p = base
1881 p = base
1902 def a():
1882 def a():
1903 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1883 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1904 util.rename(os.path.join(p, "journal.dirstate"),
1884 util.rename(os.path.join(p, "journal.dirstate"),
1905 os.path.join(p, "undo.dirstate"))
1885 os.path.join(p, "undo.dirstate"))
1906 return a
1886 return a
1907
1887
@@ -1,733 +1,756 b''
1 """
1 """
2 util.py - Mercurial utility functions and platform specfic implementations
2 util.py - Mercurial utility functions and platform specfic implementations
3
3
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5
5
6 This software may be used and distributed according to the terms
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
7 of the GNU General Public License, incorporated herein by reference.
8
8
9 This contains helper routines that are independent of the SCM core and hide
9 This contains helper routines that are independent of the SCM core and hide
10 platform-specific details from the core.
10 platform-specific details from the core.
11 """
11 """
12
12
13 import os, errno
13 import os, errno
14 from i18n import gettext as _
14 from i18n import gettext as _
15 from demandload import *
15 from demandload import *
16 demandload(globals(), "cStringIO errno popen2 re shutil sys tempfile")
16 demandload(globals(), "cStringIO errno popen2 re shutil sys tempfile")
17 demandload(globals(), "threading time")
17 demandload(globals(), "threading time")
18
18
19 def pipefilter(s, cmd):
19 def pipefilter(s, cmd):
20 '''filter string S through command CMD, returning its output'''
20 '''filter string S through command CMD, returning its output'''
21 (pout, pin) = popen2.popen2(cmd, -1, 'b')
21 (pout, pin) = popen2.popen2(cmd, -1, 'b')
22 def writer():
22 def writer():
23 pin.write(s)
23 pin.write(s)
24 pin.close()
24 pin.close()
25
25
26 # we should use select instead on UNIX, but this will work on most
26 # we should use select instead on UNIX, but this will work on most
27 # systems, including Windows
27 # systems, including Windows
28 w = threading.Thread(target=writer)
28 w = threading.Thread(target=writer)
29 w.start()
29 w.start()
30 f = pout.read()
30 f = pout.read()
31 pout.close()
31 pout.close()
32 w.join()
32 w.join()
33 return f
33 return f
34
34
35 def tempfilter(s, cmd):
35 def tempfilter(s, cmd):
36 '''filter string S through a pair of temporary files with CMD.
36 '''filter string S through a pair of temporary files with CMD.
37 CMD is used as a template to create the real command to be run,
37 CMD is used as a template to create the real command to be run,
38 with the strings INFILE and OUTFILE replaced by the real names of
38 with the strings INFILE and OUTFILE replaced by the real names of
39 the temporary files generated.'''
39 the temporary files generated.'''
40 inname, outname = None, None
40 inname, outname = None, None
41 try:
41 try:
42 infd, inname = tempfile.mkstemp(prefix='hgfin')
42 infd, inname = tempfile.mkstemp(prefix='hgfin')
43 fp = os.fdopen(infd, 'wb')
43 fp = os.fdopen(infd, 'wb')
44 fp.write(s)
44 fp.write(s)
45 fp.close()
45 fp.close()
46 outfd, outname = tempfile.mkstemp(prefix='hgfout')
46 outfd, outname = tempfile.mkstemp(prefix='hgfout')
47 os.close(outfd)
47 os.close(outfd)
48 cmd = cmd.replace('INFILE', inname)
48 cmd = cmd.replace('INFILE', inname)
49 cmd = cmd.replace('OUTFILE', outname)
49 cmd = cmd.replace('OUTFILE', outname)
50 code = os.system(cmd)
50 code = os.system(cmd)
51 if code: raise Abort(_("command '%s' failed: %s") %
51 if code: raise Abort(_("command '%s' failed: %s") %
52 (cmd, explain_exit(code)))
52 (cmd, explain_exit(code)))
53 return open(outname, 'rb').read()
53 return open(outname, 'rb').read()
54 finally:
54 finally:
55 try:
55 try:
56 if inname: os.unlink(inname)
56 if inname: os.unlink(inname)
57 except: pass
57 except: pass
58 try:
58 try:
59 if outname: os.unlink(outname)
59 if outname: os.unlink(outname)
60 except: pass
60 except: pass
61
61
62 filtertable = {
62 filtertable = {
63 'tempfile:': tempfilter,
63 'tempfile:': tempfilter,
64 'pipe:': pipefilter,
64 'pipe:': pipefilter,
65 }
65 }
66
66
67 def filter(s, cmd):
67 def filter(s, cmd):
68 "filter a string through a command that transforms its input to its output"
68 "filter a string through a command that transforms its input to its output"
69 for name, fn in filtertable.iteritems():
69 for name, fn in filtertable.iteritems():
70 if cmd.startswith(name):
70 if cmd.startswith(name):
71 return fn(s, cmd[len(name):].lstrip())
71 return fn(s, cmd[len(name):].lstrip())
72 return pipefilter(s, cmd)
72 return pipefilter(s, cmd)
73
73
74 def patch(strip, patchname, ui):
74 def patch(strip, patchname, ui):
75 """apply the patch <patchname> to the working directory.
75 """apply the patch <patchname> to the working directory.
76 a list of patched files is returned"""
76 a list of patched files is returned"""
77 fp = os.popen('patch -p%d < "%s"' % (strip, patchname))
77 fp = os.popen('patch -p%d < "%s"' % (strip, patchname))
78 files = {}
78 files = {}
79 for line in fp:
79 for line in fp:
80 line = line.rstrip()
80 line = line.rstrip()
81 ui.status("%s\n" % line)
81 ui.status("%s\n" % line)
82 if line.startswith('patching file '):
82 if line.startswith('patching file '):
83 pf = parse_patch_output(line)
83 pf = parse_patch_output(line)
84 files.setdefault(pf, 1)
84 files.setdefault(pf, 1)
85 code = fp.close()
85 code = fp.close()
86 if code:
86 if code:
87 raise Abort(_("patch command failed: %s") % explain_exit(code)[0])
87 raise Abort(_("patch command failed: %s") % explain_exit(code)[0])
88 return files.keys()
88 return files.keys()
89
89
90 def binary(s):
90 def binary(s):
91 """return true if a string is binary data using diff's heuristic"""
91 """return true if a string is binary data using diff's heuristic"""
92 if s and '\0' in s[:4096]:
92 if s and '\0' in s[:4096]:
93 return True
93 return True
94 return False
94 return False
95
95
96 def unique(g):
96 def unique(g):
97 """return the uniq elements of iterable g"""
97 """return the uniq elements of iterable g"""
98 seen = {}
98 seen = {}
99 for f in g:
99 for f in g:
100 if f not in seen:
100 if f not in seen:
101 seen[f] = 1
101 seen[f] = 1
102 yield f
102 yield f
103
103
104 class Abort(Exception):
104 class Abort(Exception):
105 """Raised if a command needs to print an error and exit."""
105 """Raised if a command needs to print an error and exit."""
106
106
107 def always(fn): return True
107 def always(fn): return True
108 def never(fn): return False
108 def never(fn): return False
109
109
110 def patkind(name, dflt_pat='glob'):
110 def patkind(name, dflt_pat='glob'):
111 """Split a string into an optional pattern kind prefix and the
111 """Split a string into an optional pattern kind prefix and the
112 actual pattern."""
112 actual pattern."""
113 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
113 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
114 if name.startswith(prefix + ':'): return name.split(':', 1)
114 if name.startswith(prefix + ':'): return name.split(':', 1)
115 return dflt_pat, name
115 return dflt_pat, name
116
116
117 def globre(pat, head='^', tail='$'):
117 def globre(pat, head='^', tail='$'):
118 "convert a glob pattern into a regexp"
118 "convert a glob pattern into a regexp"
119 i, n = 0, len(pat)
119 i, n = 0, len(pat)
120 res = ''
120 res = ''
121 group = False
121 group = False
122 def peek(): return i < n and pat[i]
122 def peek(): return i < n and pat[i]
123 while i < n:
123 while i < n:
124 c = pat[i]
124 c = pat[i]
125 i = i+1
125 i = i+1
126 if c == '*':
126 if c == '*':
127 if peek() == '*':
127 if peek() == '*':
128 i += 1
128 i += 1
129 res += '.*'
129 res += '.*'
130 else:
130 else:
131 res += '[^/]*'
131 res += '[^/]*'
132 elif c == '?':
132 elif c == '?':
133 res += '.'
133 res += '.'
134 elif c == '[':
134 elif c == '[':
135 j = i
135 j = i
136 if j < n and pat[j] in '!]':
136 if j < n and pat[j] in '!]':
137 j += 1
137 j += 1
138 while j < n and pat[j] != ']':
138 while j < n and pat[j] != ']':
139 j += 1
139 j += 1
140 if j >= n:
140 if j >= n:
141 res += '\\['
141 res += '\\['
142 else:
142 else:
143 stuff = pat[i:j].replace('\\','\\\\')
143 stuff = pat[i:j].replace('\\','\\\\')
144 i = j + 1
144 i = j + 1
145 if stuff[0] == '!':
145 if stuff[0] == '!':
146 stuff = '^' + stuff[1:]
146 stuff = '^' + stuff[1:]
147 elif stuff[0] == '^':
147 elif stuff[0] == '^':
148 stuff = '\\' + stuff
148 stuff = '\\' + stuff
149 res = '%s[%s]' % (res, stuff)
149 res = '%s[%s]' % (res, stuff)
150 elif c == '{':
150 elif c == '{':
151 group = True
151 group = True
152 res += '(?:'
152 res += '(?:'
153 elif c == '}' and group:
153 elif c == '}' and group:
154 res += ')'
154 res += ')'
155 group = False
155 group = False
156 elif c == ',' and group:
156 elif c == ',' and group:
157 res += '|'
157 res += '|'
158 else:
158 else:
159 res += re.escape(c)
159 res += re.escape(c)
160 return head + res + tail
160 return head + res + tail
161
161
162 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
162 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
163
163
164 def pathto(n1, n2):
164 def pathto(n1, n2):
165 '''return the relative path from one place to another.
165 '''return the relative path from one place to another.
166 this returns a path in the form used by the local filesystem, not hg.'''
166 this returns a path in the form used by the local filesystem, not hg.'''
167 if not n1: return localpath(n2)
167 if not n1: return localpath(n2)
168 a, b = n1.split('/'), n2.split('/')
168 a, b = n1.split('/'), n2.split('/')
169 a.reverse()
169 a.reverse()
170 b.reverse()
170 b.reverse()
171 while a and b and a[-1] == b[-1]:
171 while a and b and a[-1] == b[-1]:
172 a.pop()
172 a.pop()
173 b.pop()
173 b.pop()
174 b.reverse()
174 b.reverse()
175 return os.sep.join((['..'] * len(a)) + b)
175 return os.sep.join((['..'] * len(a)) + b)
176
176
177 def canonpath(root, cwd, myname):
177 def canonpath(root, cwd, myname):
178 """return the canonical path of myname, given cwd and root"""
178 """return the canonical path of myname, given cwd and root"""
179 if root == os.sep:
179 if root == os.sep:
180 rootsep = os.sep
180 rootsep = os.sep
181 else:
181 else:
182 rootsep = root + os.sep
182 rootsep = root + os.sep
183 name = myname
183 name = myname
184 if not name.startswith(os.sep):
184 if not name.startswith(os.sep):
185 name = os.path.join(root, cwd, name)
185 name = os.path.join(root, cwd, name)
186 name = os.path.normpath(name)
186 name = os.path.normpath(name)
187 if name.startswith(rootsep):
187 if name.startswith(rootsep):
188 return pconvert(name[len(rootsep):])
188 return pconvert(name[len(rootsep):])
189 elif name == root:
189 elif name == root:
190 return ''
190 return ''
191 else:
191 else:
192 raise Abort('%s not under root' % myname)
192 raise Abort('%s not under root' % myname)
193
193
194 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
194 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
195 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
195 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
196
196
197 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
197 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
198 if os.name == 'nt':
198 if os.name == 'nt':
199 dflt_pat = 'glob'
199 dflt_pat = 'glob'
200 else:
200 else:
201 dflt_pat = 'relpath'
201 dflt_pat = 'relpath'
202 return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src)
202 return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src)
203
203
204 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
204 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
205 """build a function to match a set of file patterns
205 """build a function to match a set of file patterns
206
206
207 arguments:
207 arguments:
208 canonroot - the canonical root of the tree you're matching against
208 canonroot - the canonical root of the tree you're matching against
209 cwd - the current working directory, if relevant
209 cwd - the current working directory, if relevant
210 names - patterns to find
210 names - patterns to find
211 inc - patterns to include
211 inc - patterns to include
212 exc - patterns to exclude
212 exc - patterns to exclude
213 head - a regex to prepend to patterns to control whether a match is rooted
213 head - a regex to prepend to patterns to control whether a match is rooted
214
214
215 a pattern is one of:
215 a pattern is one of:
216 'glob:<rooted glob>'
216 'glob:<rooted glob>'
217 're:<rooted regexp>'
217 're:<rooted regexp>'
218 'path:<rooted path>'
218 'path:<rooted path>'
219 'relglob:<relative glob>'
219 'relglob:<relative glob>'
220 'relpath:<relative path>'
220 'relpath:<relative path>'
221 'relre:<relative regexp>'
221 'relre:<relative regexp>'
222 '<rooted path or regexp>'
222 '<rooted path or regexp>'
223
223
224 returns:
224 returns:
225 a 3-tuple containing
225 a 3-tuple containing
226 - list of explicit non-pattern names passed in
226 - list of explicit non-pattern names passed in
227 - a bool match(filename) function
227 - a bool match(filename) function
228 - a bool indicating if any patterns were passed in
228 - a bool indicating if any patterns were passed in
229
229
230 todo:
230 todo:
231 make head regex a rooted bool
231 make head regex a rooted bool
232 """
232 """
233
233
234 def contains_glob(name):
234 def contains_glob(name):
235 for c in name:
235 for c in name:
236 if c in _globchars: return True
236 if c in _globchars: return True
237 return False
237 return False
238
238
239 def regex(kind, name, tail):
239 def regex(kind, name, tail):
240 '''convert a pattern into a regular expression'''
240 '''convert a pattern into a regular expression'''
241 if kind == 're':
241 if kind == 're':
242 return name
242 return name
243 elif kind == 'path':
243 elif kind == 'path':
244 return '^' + re.escape(name) + '(?:/|$)'
244 return '^' + re.escape(name) + '(?:/|$)'
245 elif kind == 'relglob':
245 elif kind == 'relglob':
246 return head + globre(name, '(?:|.*/)', tail)
246 return head + globre(name, '(?:|.*/)', tail)
247 elif kind == 'relpath':
247 elif kind == 'relpath':
248 return head + re.escape(name) + tail
248 return head + re.escape(name) + tail
249 elif kind == 'relre':
249 elif kind == 'relre':
250 if name.startswith('^'):
250 if name.startswith('^'):
251 return name
251 return name
252 return '.*' + name
252 return '.*' + name
253 return head + globre(name, '', tail)
253 return head + globre(name, '', tail)
254
254
255 def matchfn(pats, tail):
255 def matchfn(pats, tail):
256 """build a matching function from a set of patterns"""
256 """build a matching function from a set of patterns"""
257 if not pats:
257 if not pats:
258 return
258 return
259 matches = []
259 matches = []
260 for k, p in pats:
260 for k, p in pats:
261 try:
261 try:
262 pat = '(?:%s)' % regex(k, p, tail)
262 pat = '(?:%s)' % regex(k, p, tail)
263 matches.append(re.compile(pat).match)
263 matches.append(re.compile(pat).match)
264 except re.error:
264 except re.error:
265 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
265 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
266 else: raise Abort("invalid pattern (%s): %s" % (k, p))
266 else: raise Abort("invalid pattern (%s): %s" % (k, p))
267
267
268 def buildfn(text):
268 def buildfn(text):
269 for m in matches:
269 for m in matches:
270 r = m(text)
270 r = m(text)
271 if r:
271 if r:
272 return r
272 return r
273
273
274 return buildfn
274 return buildfn
275
275
276 def globprefix(pat):
276 def globprefix(pat):
277 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
277 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
278 root = []
278 root = []
279 for p in pat.split(os.sep):
279 for p in pat.split(os.sep):
280 if contains_glob(p): break
280 if contains_glob(p): break
281 root.append(p)
281 root.append(p)
282 return '/'.join(root)
282 return '/'.join(root)
283
283
284 pats = []
284 pats = []
285 files = []
285 files = []
286 roots = []
286 roots = []
287 for kind, name in [patkind(p, dflt_pat) for p in names]:
287 for kind, name in [patkind(p, dflt_pat) for p in names]:
288 if kind in ('glob', 'relpath'):
288 if kind in ('glob', 'relpath'):
289 name = canonpath(canonroot, cwd, name)
289 name = canonpath(canonroot, cwd, name)
290 if name == '':
290 if name == '':
291 kind, name = 'glob', '**'
291 kind, name = 'glob', '**'
292 if kind in ('glob', 'path', 're'):
292 if kind in ('glob', 'path', 're'):
293 pats.append((kind, name))
293 pats.append((kind, name))
294 if kind == 'glob':
294 if kind == 'glob':
295 root = globprefix(name)
295 root = globprefix(name)
296 if root: roots.append(root)
296 if root: roots.append(root)
297 elif kind == 'relpath':
297 elif kind == 'relpath':
298 files.append((kind, name))
298 files.append((kind, name))
299 roots.append(name)
299 roots.append(name)
300
300
301 patmatch = matchfn(pats, '$') or always
301 patmatch = matchfn(pats, '$') or always
302 filematch = matchfn(files, '(?:/|$)') or always
302 filematch = matchfn(files, '(?:/|$)') or always
303 incmatch = always
303 incmatch = always
304 if inc:
304 if inc:
305 incmatch = matchfn(map(patkind, inc), '(?:/|$)')
305 incmatch = matchfn(map(patkind, inc), '(?:/|$)')
306 excmatch = lambda fn: False
306 excmatch = lambda fn: False
307 if exc:
307 if exc:
308 excmatch = matchfn(map(patkind, exc), '(?:/|$)')
308 excmatch = matchfn(map(patkind, exc), '(?:/|$)')
309
309
310 return (roots,
310 return (roots,
311 lambda fn: (incmatch(fn) and not excmatch(fn) and
311 lambda fn: (incmatch(fn) and not excmatch(fn) and
312 (fn.endswith('/') or
312 (fn.endswith('/') or
313 (not pats and not files) or
313 (not pats and not files) or
314 (pats and patmatch(fn)) or
314 (pats and patmatch(fn)) or
315 (files and filematch(fn)))),
315 (files and filematch(fn)))),
316 (inc or exc or (pats and pats != [('glob', '**')])) and True)
316 (inc or exc or (pats and pats != [('glob', '**')])) and True)
317
317
318 def system(cmd, errprefix=None):
318 def system(cmd, errprefix=None):
319 """execute a shell command that must succeed"""
319 """execute a shell command that must succeed"""
320 rc = os.system(cmd)
320 rc = os.system(cmd)
321 if rc:
321 if rc:
322 errmsg = "%s %s" % (os.path.basename(cmd.split(None, 1)[0]),
322 errmsg = "%s %s" % (os.path.basename(cmd.split(None, 1)[0]),
323 explain_exit(rc)[0])
323 explain_exit(rc)[0])
324 if errprefix:
324 if errprefix:
325 errmsg = "%s: %s" % (errprefix, errmsg)
325 errmsg = "%s: %s" % (errprefix, errmsg)
326 raise Abort(errmsg)
326 raise Abort(errmsg)
327
327
328 def esystem(cmd, environ={}, cwd=None):
329 '''enhanced shell command execution.
330 run with environment maybe modified, maybe in different dir.'''
331 oldenv = {}
332 for k in environ:
333 oldenv[k] = os.environ.get(k)
334 if cwd is not None:
335 oldcwd = os.getcwd()
336 try:
337 for k, v in environ.iteritems():
338 os.environ[k] = str(v)
339 if cwd is not None and oldcwd != cwd:
340 os.chdir(cwd)
341 return os.system(cmd)
342 finally:
343 for k, v in oldenv.iteritems():
344 if v is None:
345 del os.environ[k]
346 else:
347 os.environ[k] = v
348 if cwd is not None and oldcwd != cwd:
349 os.chdir(oldcwd)
350
328 def rename(src, dst):
351 def rename(src, dst):
329 """forcibly rename a file"""
352 """forcibly rename a file"""
330 try:
353 try:
331 os.rename(src, dst)
354 os.rename(src, dst)
332 except:
355 except:
333 os.unlink(dst)
356 os.unlink(dst)
334 os.rename(src, dst)
357 os.rename(src, dst)
335
358
336 def unlink(f):
359 def unlink(f):
337 """unlink and remove the directory if it is empty"""
360 """unlink and remove the directory if it is empty"""
338 os.unlink(f)
361 os.unlink(f)
339 # try removing directories that might now be empty
362 # try removing directories that might now be empty
340 try: os.removedirs(os.path.dirname(f))
363 try: os.removedirs(os.path.dirname(f))
341 except: pass
364 except: pass
342
365
343 def copyfiles(src, dst, hardlink=None):
366 def copyfiles(src, dst, hardlink=None):
344 """Copy a directory tree using hardlinks if possible"""
367 """Copy a directory tree using hardlinks if possible"""
345
368
346 if hardlink is None:
369 if hardlink is None:
347 hardlink = (os.stat(src).st_dev ==
370 hardlink = (os.stat(src).st_dev ==
348 os.stat(os.path.dirname(dst)).st_dev)
371 os.stat(os.path.dirname(dst)).st_dev)
349
372
350 if os.path.isdir(src):
373 if os.path.isdir(src):
351 os.mkdir(dst)
374 os.mkdir(dst)
352 for name in os.listdir(src):
375 for name in os.listdir(src):
353 srcname = os.path.join(src, name)
376 srcname = os.path.join(src, name)
354 dstname = os.path.join(dst, name)
377 dstname = os.path.join(dst, name)
355 copyfiles(srcname, dstname, hardlink)
378 copyfiles(srcname, dstname, hardlink)
356 else:
379 else:
357 if hardlink:
380 if hardlink:
358 try:
381 try:
359 os_link(src, dst)
382 os_link(src, dst)
360 except:
383 except:
361 hardlink = False
384 hardlink = False
362 shutil.copy(src, dst)
385 shutil.copy(src, dst)
363 else:
386 else:
364 shutil.copy(src, dst)
387 shutil.copy(src, dst)
365
388
366 def audit_path(path):
389 def audit_path(path):
367 """Abort if path contains dangerous components"""
390 """Abort if path contains dangerous components"""
368 parts = os.path.normcase(path).split(os.sep)
391 parts = os.path.normcase(path).split(os.sep)
369 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
392 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
370 or os.pardir in parts):
393 or os.pardir in parts):
371 raise Abort(_("path contains illegal component: %s\n") % path)
394 raise Abort(_("path contains illegal component: %s\n") % path)
372
395
373 def opener(base, audit=True):
396 def opener(base, audit=True):
374 """
397 """
375 return a function that opens files relative to base
398 return a function that opens files relative to base
376
399
377 this function is used to hide the details of COW semantics and
400 this function is used to hide the details of COW semantics and
378 remote file access from higher level code.
401 remote file access from higher level code.
379 """
402 """
380 p = base
403 p = base
381 audit_p = audit
404 audit_p = audit
382
405
383 def mktempcopy(name):
406 def mktempcopy(name):
384 d, fn = os.path.split(name)
407 d, fn = os.path.split(name)
385 fd, temp = tempfile.mkstemp(prefix=fn, dir=d)
408 fd, temp = tempfile.mkstemp(prefix=fn, dir=d)
386 fp = os.fdopen(fd, "wb")
409 fp = os.fdopen(fd, "wb")
387 try:
410 try:
388 fp.write(file(name, "rb").read())
411 fp.write(file(name, "rb").read())
389 except:
412 except:
390 try: os.unlink(temp)
413 try: os.unlink(temp)
391 except: pass
414 except: pass
392 raise
415 raise
393 fp.close()
416 fp.close()
394 st = os.lstat(name)
417 st = os.lstat(name)
395 os.chmod(temp, st.st_mode)
418 os.chmod(temp, st.st_mode)
396 return temp
419 return temp
397
420
398 class atomicfile(file):
421 class atomicfile(file):
399 """the file will only be copied on close"""
422 """the file will only be copied on close"""
400 def __init__(self, name, mode, atomic=False):
423 def __init__(self, name, mode, atomic=False):
401 self.__name = name
424 self.__name = name
402 self.temp = mktempcopy(name)
425 self.temp = mktempcopy(name)
403 file.__init__(self, self.temp, mode)
426 file.__init__(self, self.temp, mode)
404 def close(self):
427 def close(self):
405 if not self.closed:
428 if not self.closed:
406 file.close(self)
429 file.close(self)
407 rename(self.temp, self.__name)
430 rename(self.temp, self.__name)
408 def __del__(self):
431 def __del__(self):
409 self.close()
432 self.close()
410
433
411 def o(path, mode="r", text=False, atomic=False):
434 def o(path, mode="r", text=False, atomic=False):
412 if audit_p:
435 if audit_p:
413 audit_path(path)
436 audit_path(path)
414 f = os.path.join(p, path)
437 f = os.path.join(p, path)
415
438
416 if not text:
439 if not text:
417 mode += "b" # for that other OS
440 mode += "b" # for that other OS
418
441
419 if mode[0] != "r":
442 if mode[0] != "r":
420 try:
443 try:
421 nlink = nlinks(f)
444 nlink = nlinks(f)
422 except OSError:
445 except OSError:
423 d = os.path.dirname(f)
446 d = os.path.dirname(f)
424 if not os.path.isdir(d):
447 if not os.path.isdir(d):
425 os.makedirs(d)
448 os.makedirs(d)
426 else:
449 else:
427 if atomic:
450 if atomic:
428 return atomicfile(f, mode)
451 return atomicfile(f, mode)
429 if nlink > 1:
452 if nlink > 1:
430 rename(mktempcopy(f), f)
453 rename(mktempcopy(f), f)
431 return file(f, mode)
454 return file(f, mode)
432
455
433 return o
456 return o
434
457
435 def _makelock_file(info, pathname):
458 def _makelock_file(info, pathname):
436 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
459 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
437 os.write(ld, info)
460 os.write(ld, info)
438 os.close(ld)
461 os.close(ld)
439
462
440 def _readlock_file(pathname):
463 def _readlock_file(pathname):
441 return file(pathname).read()
464 return file(pathname).read()
442
465
443 def nlinks(pathname):
466 def nlinks(pathname):
444 """Return number of hardlinks for the given file."""
467 """Return number of hardlinks for the given file."""
445 return os.stat(pathname).st_nlink
468 return os.stat(pathname).st_nlink
446
469
447 if hasattr(os, 'link'):
470 if hasattr(os, 'link'):
448 os_link = os.link
471 os_link = os.link
449 else:
472 else:
450 def os_link(src, dst):
473 def os_link(src, dst):
451 raise OSError(0, _("Hardlinks not supported"))
474 raise OSError(0, _("Hardlinks not supported"))
452
475
453 # Platform specific variants
476 # Platform specific variants
454 if os.name == 'nt':
477 if os.name == 'nt':
455 demandload(globals(), "msvcrt")
478 demandload(globals(), "msvcrt")
456 nulldev = 'NUL:'
479 nulldev = 'NUL:'
457
480
458 class winstdout:
481 class winstdout:
459 '''stdout on windows misbehaves if sent through a pipe'''
482 '''stdout on windows misbehaves if sent through a pipe'''
460
483
461 def __init__(self, fp):
484 def __init__(self, fp):
462 self.fp = fp
485 self.fp = fp
463
486
464 def __getattr__(self, key):
487 def __getattr__(self, key):
465 return getattr(self.fp, key)
488 return getattr(self.fp, key)
466
489
467 def close(self):
490 def close(self):
468 try:
491 try:
469 self.fp.close()
492 self.fp.close()
470 except: pass
493 except: pass
471
494
472 def write(self, s):
495 def write(self, s):
473 try:
496 try:
474 return self.fp.write(s)
497 return self.fp.write(s)
475 except IOError, inst:
498 except IOError, inst:
476 if inst.errno != 0: raise
499 if inst.errno != 0: raise
477 self.close()
500 self.close()
478 raise IOError(errno.EPIPE, 'Broken pipe')
501 raise IOError(errno.EPIPE, 'Broken pipe')
479
502
480 sys.stdout = winstdout(sys.stdout)
503 sys.stdout = winstdout(sys.stdout)
481
504
482 try:
505 try:
483 import win32api, win32process
506 import win32api, win32process
484 filename = win32process.GetModuleFileNameEx(win32api.GetCurrentProcess(), 0)
507 filename = win32process.GetModuleFileNameEx(win32api.GetCurrentProcess(), 0)
485 systemrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
508 systemrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
486
509
487 except ImportError:
510 except ImportError:
488 systemrc = r'c:\mercurial\mercurial.ini'
511 systemrc = r'c:\mercurial\mercurial.ini'
489 pass
512 pass
490
513
491 rcpath = (systemrc,
514 rcpath = (systemrc,
492 os.path.join(os.path.expanduser('~'), 'mercurial.ini'))
515 os.path.join(os.path.expanduser('~'), 'mercurial.ini'))
493
516
494 def parse_patch_output(output_line):
517 def parse_patch_output(output_line):
495 """parses the output produced by patch and returns the file name"""
518 """parses the output produced by patch and returns the file name"""
496 pf = output_line[14:]
519 pf = output_line[14:]
497 if pf[0] == '`':
520 if pf[0] == '`':
498 pf = pf[1:-1] # Remove the quotes
521 pf = pf[1:-1] # Remove the quotes
499 return pf
522 return pf
500
523
501 try: # ActivePython can create hard links using win32file module
524 try: # ActivePython can create hard links using win32file module
502 import win32api, win32con, win32file
525 import win32api, win32con, win32file
503
526
504 def os_link(src, dst): # NB will only succeed on NTFS
527 def os_link(src, dst): # NB will only succeed on NTFS
505 win32file.CreateHardLink(dst, src)
528 win32file.CreateHardLink(dst, src)
506
529
507 def nlinks(pathname):
530 def nlinks(pathname):
508 """Return number of hardlinks for the given file."""
531 """Return number of hardlinks for the given file."""
509 try:
532 try:
510 fh = win32file.CreateFile(pathname,
533 fh = win32file.CreateFile(pathname,
511 win32file.GENERIC_READ, win32file.FILE_SHARE_READ,
534 win32file.GENERIC_READ, win32file.FILE_SHARE_READ,
512 None, win32file.OPEN_EXISTING, 0, None)
535 None, win32file.OPEN_EXISTING, 0, None)
513 res = win32file.GetFileInformationByHandle(fh)
536 res = win32file.GetFileInformationByHandle(fh)
514 fh.Close()
537 fh.Close()
515 return res[7]
538 return res[7]
516 except:
539 except:
517 return os.stat(pathname).st_nlink
540 return os.stat(pathname).st_nlink
518
541
519 def testpid(pid):
542 def testpid(pid):
520 '''return False if pid is dead, True if running or not known'''
543 '''return False if pid is dead, True if running or not known'''
521 try:
544 try:
522 win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION,
545 win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION,
523 False, pid)
546 False, pid)
524 except:
547 except:
525 return True
548 return True
526
549
527 except ImportError:
550 except ImportError:
528 def testpid(pid):
551 def testpid(pid):
529 '''return False if pid dead, True if running or not known'''
552 '''return False if pid dead, True if running or not known'''
530 return True
553 return True
531
554
532 def is_exec(f, last):
555 def is_exec(f, last):
533 return last
556 return last
534
557
535 def set_exec(f, mode):
558 def set_exec(f, mode):
536 pass
559 pass
537
560
538 def set_binary(fd):
561 def set_binary(fd):
539 msvcrt.setmode(fd.fileno(), os.O_BINARY)
562 msvcrt.setmode(fd.fileno(), os.O_BINARY)
540
563
541 def pconvert(path):
564 def pconvert(path):
542 return path.replace("\\", "/")
565 return path.replace("\\", "/")
543
566
544 def localpath(path):
567 def localpath(path):
545 return path.replace('/', '\\')
568 return path.replace('/', '\\')
546
569
547 def normpath(path):
570 def normpath(path):
548 return pconvert(os.path.normpath(path))
571 return pconvert(os.path.normpath(path))
549
572
550 makelock = _makelock_file
573 makelock = _makelock_file
551 readlock = _readlock_file
574 readlock = _readlock_file
552
575
553 def explain_exit(code):
576 def explain_exit(code):
554 return _("exited with status %d") % code, code
577 return _("exited with status %d") % code, code
555
578
556 else:
579 else:
557 nulldev = '/dev/null'
580 nulldev = '/dev/null'
558
581
559 def rcfiles(path):
582 def rcfiles(path):
560 rcs = [os.path.join(path, 'hgrc')]
583 rcs = [os.path.join(path, 'hgrc')]
561 rcdir = os.path.join(path, 'hgrc.d')
584 rcdir = os.path.join(path, 'hgrc.d')
562 try:
585 try:
563 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
586 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
564 if f.endswith(".rc")])
587 if f.endswith(".rc")])
565 except OSError, inst: pass
588 except OSError, inst: pass
566 return rcs
589 return rcs
567 rcpath = []
590 rcpath = []
568 if len(sys.argv) > 0:
591 if len(sys.argv) > 0:
569 rcpath.extend(rcfiles(os.path.dirname(sys.argv[0]) + '/../etc/mercurial'))
592 rcpath.extend(rcfiles(os.path.dirname(sys.argv[0]) + '/../etc/mercurial'))
570 rcpath.extend(rcfiles('/etc/mercurial'))
593 rcpath.extend(rcfiles('/etc/mercurial'))
571 rcpath.append(os.path.expanduser('~/.hgrc'))
594 rcpath.append(os.path.expanduser('~/.hgrc'))
572 rcpath = [os.path.normpath(f) for f in rcpath]
595 rcpath = [os.path.normpath(f) for f in rcpath]
573
596
574 def parse_patch_output(output_line):
597 def parse_patch_output(output_line):
575 """parses the output produced by patch and returns the file name"""
598 """parses the output produced by patch and returns the file name"""
576 pf = output_line[14:]
599 pf = output_line[14:]
577 if pf.startswith("'") and pf.endswith("'") and pf.find(" ") >= 0:
600 if pf.startswith("'") and pf.endswith("'") and pf.find(" ") >= 0:
578 pf = pf[1:-1] # Remove the quotes
601 pf = pf[1:-1] # Remove the quotes
579 return pf
602 return pf
580
603
581 def is_exec(f, last):
604 def is_exec(f, last):
582 """check whether a file is executable"""
605 """check whether a file is executable"""
583 return (os.stat(f).st_mode & 0100 != 0)
606 return (os.stat(f).st_mode & 0100 != 0)
584
607
585 def set_exec(f, mode):
608 def set_exec(f, mode):
586 s = os.stat(f).st_mode
609 s = os.stat(f).st_mode
587 if (s & 0100 != 0) == mode:
610 if (s & 0100 != 0) == mode:
588 return
611 return
589 if mode:
612 if mode:
590 # Turn on +x for every +r bit when making a file executable
613 # Turn on +x for every +r bit when making a file executable
591 # and obey umask.
614 # and obey umask.
592 umask = os.umask(0)
615 umask = os.umask(0)
593 os.umask(umask)
616 os.umask(umask)
594 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
617 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
595 else:
618 else:
596 os.chmod(f, s & 0666)
619 os.chmod(f, s & 0666)
597
620
598 def set_binary(fd):
621 def set_binary(fd):
599 pass
622 pass
600
623
601 def pconvert(path):
624 def pconvert(path):
602 return path
625 return path
603
626
604 def localpath(path):
627 def localpath(path):
605 return path
628 return path
606
629
607 normpath = os.path.normpath
630 normpath = os.path.normpath
608
631
609 def makelock(info, pathname):
632 def makelock(info, pathname):
610 try:
633 try:
611 os.symlink(info, pathname)
634 os.symlink(info, pathname)
612 except OSError, why:
635 except OSError, why:
613 if why.errno == errno.EEXIST:
636 if why.errno == errno.EEXIST:
614 raise
637 raise
615 else:
638 else:
616 _makelock_file(info, pathname)
639 _makelock_file(info, pathname)
617
640
618 def readlock(pathname):
641 def readlock(pathname):
619 try:
642 try:
620 return os.readlink(pathname)
643 return os.readlink(pathname)
621 except OSError, why:
644 except OSError, why:
622 if why.errno == errno.EINVAL:
645 if why.errno == errno.EINVAL:
623 return _readlock_file(pathname)
646 return _readlock_file(pathname)
624 else:
647 else:
625 raise
648 raise
626
649
627 def testpid(pid):
650 def testpid(pid):
628 '''return False if pid dead, True if running or not sure'''
651 '''return False if pid dead, True if running or not sure'''
629 try:
652 try:
630 os.kill(pid, 0)
653 os.kill(pid, 0)
631 return True
654 return True
632 except OSError, inst:
655 except OSError, inst:
633 return inst.errno != errno.ESRCH
656 return inst.errno != errno.ESRCH
634
657
635 def explain_exit(code):
658 def explain_exit(code):
636 """return a 2-tuple (desc, code) describing a process's status"""
659 """return a 2-tuple (desc, code) describing a process's status"""
637 if os.WIFEXITED(code):
660 if os.WIFEXITED(code):
638 val = os.WEXITSTATUS(code)
661 val = os.WEXITSTATUS(code)
639 return _("exited with status %d") % val, val
662 return _("exited with status %d") % val, val
640 elif os.WIFSIGNALED(code):
663 elif os.WIFSIGNALED(code):
641 val = os.WTERMSIG(code)
664 val = os.WTERMSIG(code)
642 return _("killed by signal %d") % val, val
665 return _("killed by signal %d") % val, val
643 elif os.WIFSTOPPED(code):
666 elif os.WIFSTOPPED(code):
644 val = os.WSTOPSIG(code)
667 val = os.WSTOPSIG(code)
645 return _("stopped by signal %d") % val, val
668 return _("stopped by signal %d") % val, val
646 raise ValueError(_("invalid exit code"))
669 raise ValueError(_("invalid exit code"))
647
670
648 class chunkbuffer(object):
671 class chunkbuffer(object):
649 """Allow arbitrary sized chunks of data to be efficiently read from an
672 """Allow arbitrary sized chunks of data to be efficiently read from an
650 iterator over chunks of arbitrary size."""
673 iterator over chunks of arbitrary size."""
651
674
652 def __init__(self, in_iter, targetsize = 2**16):
675 def __init__(self, in_iter, targetsize = 2**16):
653 """in_iter is the iterator that's iterating over the input chunks.
676 """in_iter is the iterator that's iterating over the input chunks.
654 targetsize is how big a buffer to try to maintain."""
677 targetsize is how big a buffer to try to maintain."""
655 self.in_iter = iter(in_iter)
678 self.in_iter = iter(in_iter)
656 self.buf = ''
679 self.buf = ''
657 self.targetsize = int(targetsize)
680 self.targetsize = int(targetsize)
658 if self.targetsize <= 0:
681 if self.targetsize <= 0:
659 raise ValueError(_("targetsize must be greater than 0, was %d") %
682 raise ValueError(_("targetsize must be greater than 0, was %d") %
660 targetsize)
683 targetsize)
661 self.iterempty = False
684 self.iterempty = False
662
685
663 def fillbuf(self):
686 def fillbuf(self):
664 """Ignore target size; read every chunk from iterator until empty."""
687 """Ignore target size; read every chunk from iterator until empty."""
665 if not self.iterempty:
688 if not self.iterempty:
666 collector = cStringIO.StringIO()
689 collector = cStringIO.StringIO()
667 collector.write(self.buf)
690 collector.write(self.buf)
668 for ch in self.in_iter:
691 for ch in self.in_iter:
669 collector.write(ch)
692 collector.write(ch)
670 self.buf = collector.getvalue()
693 self.buf = collector.getvalue()
671 self.iterempty = True
694 self.iterempty = True
672
695
673 def read(self, l):
696 def read(self, l):
674 """Read L bytes of data from the iterator of chunks of data.
697 """Read L bytes of data from the iterator of chunks of data.
675 Returns less than L bytes if the iterator runs dry."""
698 Returns less than L bytes if the iterator runs dry."""
676 if l > len(self.buf) and not self.iterempty:
699 if l > len(self.buf) and not self.iterempty:
677 # Clamp to a multiple of self.targetsize
700 # Clamp to a multiple of self.targetsize
678 targetsize = self.targetsize * ((l // self.targetsize) + 1)
701 targetsize = self.targetsize * ((l // self.targetsize) + 1)
679 collector = cStringIO.StringIO()
702 collector = cStringIO.StringIO()
680 collector.write(self.buf)
703 collector.write(self.buf)
681 collected = len(self.buf)
704 collected = len(self.buf)
682 for chunk in self.in_iter:
705 for chunk in self.in_iter:
683 collector.write(chunk)
706 collector.write(chunk)
684 collected += len(chunk)
707 collected += len(chunk)
685 if collected >= targetsize:
708 if collected >= targetsize:
686 break
709 break
687 if collected < targetsize:
710 if collected < targetsize:
688 self.iterempty = True
711 self.iterempty = True
689 self.buf = collector.getvalue()
712 self.buf = collector.getvalue()
690 s, self.buf = self.buf[:l], buffer(self.buf, l)
713 s, self.buf = self.buf[:l], buffer(self.buf, l)
691 return s
714 return s
692
715
693 def filechunkiter(f, size = 65536):
716 def filechunkiter(f, size = 65536):
694 """Create a generator that produces all the data in the file size
717 """Create a generator that produces all the data in the file size
695 (default 65536) bytes at a time. Chunks may be less than size
718 (default 65536) bytes at a time. Chunks may be less than size
696 bytes if the chunk is the last chunk in the file, or the file is a
719 bytes if the chunk is the last chunk in the file, or the file is a
697 socket or some other type of file that sometimes reads less data
720 socket or some other type of file that sometimes reads less data
698 than is requested."""
721 than is requested."""
699 s = f.read(size)
722 s = f.read(size)
700 while len(s) > 0:
723 while len(s) > 0:
701 yield s
724 yield s
702 s = f.read(size)
725 s = f.read(size)
703
726
704 def makedate():
727 def makedate():
705 lt = time.localtime()
728 lt = time.localtime()
706 if lt[8] == 1 and time.daylight:
729 if lt[8] == 1 and time.daylight:
707 tz = time.altzone
730 tz = time.altzone
708 else:
731 else:
709 tz = time.timezone
732 tz = time.timezone
710 return time.mktime(lt), tz
733 return time.mktime(lt), tz
711
734
712 def datestr(date=None, format='%c'):
735 def datestr(date=None, format='%c'):
713 """represent a (unixtime, offset) tuple as a localized time.
736 """represent a (unixtime, offset) tuple as a localized time.
714 unixtime is seconds since the epoch, and offset is the time zone's
737 unixtime is seconds since the epoch, and offset is the time zone's
715 number of seconds away from UTC."""
738 number of seconds away from UTC."""
716 t, tz = date or makedate()
739 t, tz = date or makedate()
717 return ("%s %+03d%02d" %
740 return ("%s %+03d%02d" %
718 (time.strftime(format, time.gmtime(float(t) - tz)),
741 (time.strftime(format, time.gmtime(float(t) - tz)),
719 -tz / 3600,
742 -tz / 3600,
720 ((-tz % 3600) / 60)))
743 ((-tz % 3600) / 60)))
721
744
722 def walkrepos(path):
745 def walkrepos(path):
723 '''yield every hg repository under path, recursively.'''
746 '''yield every hg repository under path, recursively.'''
724 def errhandler(err):
747 def errhandler(err):
725 if err.filename == path:
748 if err.filename == path:
726 raise err
749 raise err
727
750
728 for root, dirs, files in os.walk(path, onerror=errhandler):
751 for root, dirs, files in os.walk(path, onerror=errhandler):
729 for d in dirs:
752 for d in dirs:
730 if d == '.hg':
753 if d == '.hg':
731 yield root
754 yield root
732 dirs[:] = []
755 dirs[:] = []
733 break
756 break
General Comments 0
You need to be logged in to leave comments. Login now