blob: e979f9b22cb6558fa6b614eef9eb55fb53161bcb [file] [log] [blame]
Stefan Richter77c9a5d2009-06-05 16:26:18 +02001#ifndef _LINUX_FIREWIRE_H
2#define _LINUX_FIREWIRE_H
3
4#include <linux/completion.h>
5#include <linux/device.h>
6#include <linux/kernel.h>
7#include <linux/kref.h>
8#include <linux/list.h>
9#include <linux/mutex.h>
10#include <linux/spinlock.h>
11#include <linux/sysfs.h>
12#include <linux/timer.h>
13#include <linux/types.h>
14#include <linux/workqueue.h>
15
16#include <asm/atomic.h>
17#include <asm/byteorder.h>
18
19#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
20#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
21
22static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
23{
24 u32 *dst = _dst;
25 __be32 *src = _src;
26 int i;
27
28 for (i = 0; i < size / 4; i++)
29 dst[i] = be32_to_cpu(src[i]);
30}
31
32static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
33{
34 fw_memcpy_from_be32(_dst, _src, size);
35}
36#define CSR_REGISTER_BASE 0xfffff0000000ULL
37
38/* register offsets are relative to CSR_REGISTER_BASE */
39#define CSR_STATE_CLEAR 0x0
40#define CSR_STATE_SET 0x4
41#define CSR_NODE_IDS 0x8
42#define CSR_RESET_START 0xc
43#define CSR_SPLIT_TIMEOUT_HI 0x18
44#define CSR_SPLIT_TIMEOUT_LO 0x1c
45#define CSR_CYCLE_TIME 0x200
46#define CSR_BUS_TIME 0x204
47#define CSR_BUSY_TIMEOUT 0x210
48#define CSR_BUS_MANAGER_ID 0x21c
49#define CSR_BANDWIDTH_AVAILABLE 0x220
50#define CSR_CHANNELS_AVAILABLE 0x224
51#define CSR_CHANNELS_AVAILABLE_HI 0x224
52#define CSR_CHANNELS_AVAILABLE_LO 0x228
53#define CSR_BROADCAST_CHANNEL 0x234
54#define CSR_CONFIG_ROM 0x400
55#define CSR_CONFIG_ROM_END 0x800
56#define CSR_FCP_COMMAND 0xB00
57#define CSR_FCP_RESPONSE 0xD00
58#define CSR_FCP_END 0xF00
59#define CSR_TOPOLOGY_MAP 0x1000
60#define CSR_TOPOLOGY_MAP_END 0x1400
61#define CSR_SPEED_MAP 0x2000
62#define CSR_SPEED_MAP_END 0x3000
63
64#define CSR_OFFSET 0x40
65#define CSR_LEAF 0x80
66#define CSR_DIRECTORY 0xc0
67
68#define CSR_DESCRIPTOR 0x01
69#define CSR_VENDOR 0x03
70#define CSR_HARDWARE_VERSION 0x04
71#define CSR_NODE_CAPABILITIES 0x0c
72#define CSR_UNIT 0x11
73#define CSR_SPECIFIER_ID 0x12
74#define CSR_VERSION 0x13
75#define CSR_DEPENDENT_INFO 0x14
76#define CSR_MODEL 0x17
77#define CSR_INSTANCE 0x18
78#define CSR_DIRECTORY_ID 0x20
79
80struct fw_csr_iterator {
81 u32 *p;
82 u32 *end;
83};
84
85void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p);
86int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value);
87
88extern struct bus_type fw_bus_type;
89
90struct fw_card_driver;
91struct fw_node;
92
93struct fw_card {
94 const struct fw_card_driver *driver;
95 struct device *device;
96 struct kref kref;
97 struct completion done;
98
99 int node_id;
100 int generation;
101 int current_tlabel, tlabel_mask;
102 struct list_head transaction_list;
103 struct timer_list flush_timer;
104 unsigned long reset_jiffies;
105
106 unsigned long long guid;
107 unsigned max_receive;
108 int link_speed;
109 int config_rom_generation;
110
111 spinlock_t lock; /* Take this lock when handling the lists in
112 * this struct. */
113 struct fw_node *local_node;
114 struct fw_node *root_node;
115 struct fw_node *irm_node;
116 u8 color; /* must be u8 to match the definition in struct fw_node */
117 int gap_count;
118 bool beta_repeaters_present;
119
120 int index;
121
122 struct list_head link;
123
124 /* Work struct for BM duties. */
125 struct delayed_work work;
126 int bm_retries;
127 int bm_generation;
128
129 bool broadcast_channel_allocated;
130 u32 broadcast_channel;
131 u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
132};
133
134static inline struct fw_card *fw_card_get(struct fw_card *card)
135{
136 kref_get(&card->kref);
137
138 return card;
139}
140
141void fw_card_release(struct kref *kref);
142
143static inline void fw_card_put(struct fw_card *card)
144{
145 kref_put(&card->kref, fw_card_release);
146}
147
148struct fw_attribute_group {
149 struct attribute_group *groups[2];
150 struct attribute_group group;
151 struct attribute *attrs[12];
152};
153
154enum fw_device_state {
155 FW_DEVICE_INITIALIZING,
156 FW_DEVICE_RUNNING,
157 FW_DEVICE_GONE,
158 FW_DEVICE_SHUTDOWN,
159};
160
161/*
162 * Note, fw_device.generation always has to be read before fw_device.node_id.
163 * Use SMP memory barriers to ensure this. Otherwise requests will be sent
164 * to an outdated node_id if the generation was updated in the meantime due
165 * to a bus reset.
166 *
167 * Likewise, fw-core will take care to update .node_id before .generation so
168 * that whenever fw_device.generation is current WRT the actual bus generation,
169 * fw_device.node_id is guaranteed to be current too.
170 *
171 * The same applies to fw_device.card->node_id vs. fw_device.generation.
172 *
173 * fw_device.config_rom and fw_device.config_rom_length may be accessed during
174 * the lifetime of any fw_unit belonging to the fw_device, before device_del()
175 * was called on the last fw_unit. Alternatively, they may be accessed while
176 * holding fw_device_rwsem.
177 */
178struct fw_device {
179 atomic_t state;
180 struct fw_node *node;
181 int node_id;
182 int generation;
183 unsigned max_speed;
184 struct fw_card *card;
185 struct device device;
186
187 struct mutex client_list_mutex;
188 struct list_head client_list;
189
190 u32 *config_rom;
191 size_t config_rom_length;
192 int config_rom_retries;
193 unsigned is_local:1;
194 unsigned cmc:1;
195 unsigned bc_implemented:2;
196
197 struct delayed_work work;
198 struct fw_attribute_group attribute_group;
199};
200
201static inline struct fw_device *fw_device(struct device *dev)
202{
203 return container_of(dev, struct fw_device, device);
204}
205
206static inline int fw_device_is_shutdown(struct fw_device *device)
207{
208 return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
209}
210
211static inline struct fw_device *fw_device_get(struct fw_device *device)
212{
213 get_device(&device->device);
214
215 return device;
216}
217
218static inline void fw_device_put(struct fw_device *device)
219{
220 put_device(&device->device);
221}
222
223int fw_device_enable_phys_dma(struct fw_device *device);
224
225/*
226 * fw_unit.directory must not be accessed after device_del(&fw_unit.device).
227 */
228struct fw_unit {
229 struct device device;
230 u32 *directory;
231 struct fw_attribute_group attribute_group;
232};
233
234static inline struct fw_unit *fw_unit(struct device *dev)
235{
236 return container_of(dev, struct fw_unit, device);
237}
238
239static inline struct fw_unit *fw_unit_get(struct fw_unit *unit)
240{
241 get_device(&unit->device);
242
243 return unit;
244}
245
246static inline void fw_unit_put(struct fw_unit *unit)
247{
248 put_device(&unit->device);
249}
250
251struct ieee1394_device_id;
252
253struct fw_driver {
254 struct device_driver driver;
255 /* Called when the parent device sits through a bus reset. */
256 void (*update)(struct fw_unit *unit);
257 const struct ieee1394_device_id *id_table;
258};
259
260struct fw_packet;
261struct fw_request;
262
263typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
264 struct fw_card *card, int status);
265typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
266 void *data, size_t length,
267 void *callback_data);
268/*
269 * Important note: The callback must guarantee that either fw_send_response()
270 * or kfree() is called on the @request.
271 */
272typedef void (*fw_address_callback_t)(struct fw_card *card,
273 struct fw_request *request,
274 int tcode, int destination, int source,
275 int generation, int speed,
276 unsigned long long offset,
277 void *data, size_t length,
278 void *callback_data);
279
280struct fw_packet {
281 int speed;
282 int generation;
283 u32 header[4];
284 size_t header_length;
285 void *payload;
286 size_t payload_length;
287 dma_addr_t payload_bus;
288 u32 timestamp;
289
290 /*
291 * This callback is called when the packet transmission has
292 * completed; for successful transmission, the status code is
293 * the ack received from the destination, otherwise it's a
294 * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO.
295 * The callback can be called from tasklet context and thus
296 * must never block.
297 */
298 fw_packet_callback_t callback;
299 int ack;
300 struct list_head link;
301 void *driver_data;
302};
303
304struct fw_transaction {
305 int node_id; /* The generation is implied; it is always the current. */
306 int tlabel;
307 int timestamp;
308 struct list_head link;
309
310 struct fw_packet packet;
311
312 /*
313 * The data passed to the callback is valid only during the
314 * callback.
315 */
316 fw_transaction_callback_t callback;
317 void *callback_data;
318};
319
320struct fw_address_handler {
321 u64 offset;
322 size_t length;
323 fw_address_callback_t address_callback;
324 void *callback_data;
325 struct list_head link;
326};
327
328struct fw_address_region {
329 u64 start;
330 u64 end;
331};
332
333extern const struct fw_address_region fw_high_memory_region;
334
335int fw_core_add_address_handler(struct fw_address_handler *handler,
336 const struct fw_address_region *region);
337void fw_core_remove_address_handler(struct fw_address_handler *handler);
338void fw_send_response(struct fw_card *card,
339 struct fw_request *request, int rcode);
340void fw_send_request(struct fw_card *card, struct fw_transaction *t,
341 int tcode, int destination_id, int generation, int speed,
342 unsigned long long offset, void *payload, size_t length,
343 fw_transaction_callback_t callback, void *callback_data);
344int fw_cancel_transaction(struct fw_card *card,
345 struct fw_transaction *transaction);
346int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
347 int generation, int speed, unsigned long long offset,
348 void *payload, size_t length);
349
350#endif /* _LINUX_FIREWIRE_H */