if (!async)
return -ENOMEM;
+ trace_regmap_async_write_start(map->dev, reg, val_len);
+
async->work_buf = kzalloc(map->format.buf_size,
GFP_KERNEL | GFP_DMA);
if (!async->work_buf) {
struct regmap *map = async->map;
bool wake;
+ trace_regmap_async_io_complete(map->dev);
+
spin_lock(&map->async_lock);
list_del(&async->list);
if (!map->bus->async_write)
return 0;
+ trace_regmap_async_complete_start(map->dev);
+
wait_event(map->async_waitq, regmap_async_is_done(map));
spin_lock_irqsave(&map->async_lock, flags);
map->async_ret = 0;
spin_unlock_irqrestore(&map->async_lock, flags);
+ trace_regmap_async_complete_done(map->dev);
+
return ret;
}
EXPORT_SYMBOL_GPL(regmap_async_complete);
);
+DECLARE_EVENT_CLASS(regmap_async,
+
+ TP_PROTO(struct device *dev),
+
+ TP_ARGS(dev),
+
+ TP_STRUCT__entry(
+ __string( name, dev_name(dev) )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ ),
+
+ TP_printk("%s", __get_str(name))
+);
+
+DEFINE_EVENT(regmap_block, regmap_async_write_start,
+
+ TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+ TP_ARGS(dev, reg, count)
+);
+
+DEFINE_EVENT(regmap_async, regmap_async_io_complete,
+
+ TP_PROTO(struct device *dev),
+
+ TP_ARGS(dev)
+
+);
+
+DEFINE_EVENT(regmap_async, regmap_async_complete_start,
+
+ TP_PROTO(struct device *dev),
+
+ TP_ARGS(dev)
+
+);
+
+DEFINE_EVENT(regmap_async, regmap_async_complete_done,
+
+ TP_PROTO(struct device *dev),
+
+ TP_ARGS(dev)
+
+);
+
#endif /* _TRACE_REGMAP_H */
/* This part must be outside protection */